source
stringlengths
3
92
c
stringlengths
26
2.25M
zgels.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gels * * Solves overdetermined or underdetermined linear systems involving an m-by-n * matrix A, or its conjugate-transpose, using a QR or LQ factorization of A. * It is assumed that A has full rank. The following options are provided: * * # trans = PlasmaNoTrans and m >= n: find the least squares solution of an * overdetermined system, i.e., solve the least squares problem: * minimize || B - A*X ||. * * # trans = PlasmaNoTrans and m < n: find the minimum norm solution of an * underdetermined system A * X = B. * * # trans = Plasma_ConjTrans and m >= n: find the minimum norm solution of an * underdetermined system A^H * X = B. * * # trans = Plasma_ConjTrans and m < n: find the least squares solution of an * overdetermined system, i.e., solve the least squares problem: * minimize || B - A^H*X ||. * * Several right-hand side vectors B and solution vectors X can be handled in a * single call; they are stored as the columns of the m-by-nrhs right-hand side * matrix B and the n-by-nrhs solution matrix X. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: the linear system involves A * - Plasma_ConjTrans: the linear system involves A^H * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns of the * matrices B and X. nrhs >= 0. * * @param[in,out] pA * On entry, pointer to the m-by-n matrix A. * On exit, * if m >= n, A is overwritten by details of its QR factorization as * returned by plasma_zgeqrf; * if m < n, A is overwritten by details of its LQ factorization as * returned by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * On exit, auxiliary factorization data. * Matrix of T is allocated inside this function and needs to be * destroyed by plasma_desc_destroy. * * @param[in,out] pB * On entry, pointer to the m-by-nrhs matrix B of right-hand side * vectors, stored columnwise; * On exit, if return value = 0, B is overwritten by the solution * vectors, stored columnwise: * if trans = PlasmaNoTrans and m >= n, rows 1 to n of B contain the * least squares solution vectors; the residual sum of squares * for the solution in each column is given by the sum of * squares of the modulus of elements n+1 to m in that column; * if trans = PlasmaNoTrans and m < n, rows 1 to n of B contain the * minimum norm solution vectors; * if trans = Plasma_ConjTrans and m >= n, rows 1 to m of B contain the * minimum norm solution vectors; * if trans = Plasma_ConjTrans and m < n, rows 1 to m of B contain the * least squares solution vectors; the residual sum of squares * for the solution in each column is given by the sum of * squares of the modulus of elements M+1 to N in that column. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zgels * @sa plasma_cgels * @sa plasma_dgels * @sa plasma_sgels * @sa plasma_zgeqrf * @sa plasma_zgeqrs * ******************************************************************************/ int plasma_zgels(plasma_enum_t trans, int m, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_desc_t *T, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((trans != PlasmaNoTrans) && (trans != Plasma_ConjTrans)) { plasma_error("illegal value of trans"); return PlasmaErrorIllegalValue; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -6; } if (ldb < imax(1, imax(m, n))) { plasma_error("illegal value of ldb"); return -9; } // quick return if (imin(m, imin(n, nrhs)) == 0) { for (int i = 0; i < imax(m, n); i++) for (int j = 0; j < nrhs; j++) pB[j*ldb+i] = 0.0; return PlasmaSuccess; } // Tune parameters. if (plasma->tuning) { if (m < n) plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n); else plasma_tune_geqrf(plasma, PlasmaComplexDouble, m, n); } // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; plasma_enum_t householder_mode = plasma->householder_mode; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, imax(m, n), nrhs, 0, 0, imax(m, n), nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Prepare descriptor T. retval = plasma_descT_create(A, ib, householder_mode, T); if (retval != PlasmaSuccess) { plasma_error("plasma_descT_create() failed"); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = nb + ib*nb; // geqrt/gelqt: tau + work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zgels(trans, A, *T, B, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gels * * Solves overdetermined or underdetermined linear * system of equations using the tile QR or the tile LQ factorization. * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: the linear system involves A * - Plasma_ConjTrans: the linear system involves A^H * * @param[in,out] A * Descriptor of matrix A stored in the tile layout. * On exit, * if m >= n, A is overwritten by details of its QR factorization * as returned by plasma_zgeqrf; * if m < n, A is overwritten by details of its LQ factorization * as returned by plasma_zgelqf. * * @param[out] T * Descriptor of matrix T. * Auxiliary factorization data, computed by * plasma_zgeqrf or plasma_zgelqf. * * @param[in,out] B * Descriptor of matrix B. * On entry, right-hand side matrix B in the tile layout. * On exit, solution matrix X in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For QR/LQ factorizations used in GELS, it contains preallocated * space for tau and work arrays. * Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgels * @sa plasma_omp_cgels * @sa plasma_omp_dgels * @sa plasma_omp_sgels * ******************************************************************************/ void plasma_omp_zgels(plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t B, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((trans != PlasmaNoTrans) && (trans != Plasma_ConjTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorNotSupported); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid descriptor T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid descriptor B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0 || B.n == 0) { // Zero matrix B. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, B, sequence, request); return; } //=============================== // Solve using QR factorization. //=============================== if (A.m >= A.n) { // Compute QR factorization of A. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzgeqrf_tree(A, T, work, sequence, request); } else { plasma_pzgeqrf(A, T, work, sequence, request); } if (trans == PlasmaNoTrans) { // Find Y = Q^H * B. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmqr_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmqr(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } // Solve R * X = Y. plasma_pztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.n, A.n), plasma_desc_view(B, 0, 0, A.n, B.n), sequence, request); } else { // trans == Plasma_ConjTrans // Zero the trailing block of the right-hand-side matrix. // B has less rows than X. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, plasma_desc_view(B, A.n, 0, A.m-A.n, B.n), sequence, request); // Solve R^H * Y = B. plasma_pztrsm( PlasmaLeft, PlasmaUpper, Plasma_ConjTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.n, A.n), plasma_desc_view(B, 0, 0, A.n, B.n), sequence, request); // Find X = Q * Y. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmqr_tree(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } else { plasma_pzunmqr(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } } } //=============================== // Solve using LQ factorization. //=============================== else { // Compute LQ factorization of A. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzgelqf_tree(A, T, work, sequence, request); } else { plasma_pzgelqf(A, T, work, sequence, request); } if (trans == PlasmaNoTrans) { // Zero the trailing block of the right-hand-side matrix. // B has less rows than X. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, plasma_desc_view(B, A.m, 0, A.n-A.m, B.n), sequence, request); // Solve L * Y = B. plasma_pztrsm( PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.m, A.m), plasma_desc_view(B, 0, 0, A.m, B.n), sequence, request); // Find X = Q^H * Y. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmlq(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } } else { // trans == Plasma_ConjTrans // Find Y = Q * B. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } else { plasma_pzunmlq(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } // Solve L^H * X = Y. plasma_pztrsm( PlasmaLeft, PlasmaLower, Plasma_ConjTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.m, A.m), plasma_desc_view(B, 0, 0, A.m, B.n), sequence, request); } } }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/MagickCore.h" #include "MagickCore/exception-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DuplexTransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticMetacontent() returns the image view authentic % meta-content. % % The format of the GetImageViewAuthenticPixels method is: % % void *GetImageViewAuthenticMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport void *GetImageViewAuthenticMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % Quantum *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Quantum *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MagickPathExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualMetacontent() returns the image view virtual % meta-content. % % The format of the GetImageViewVirtualMetacontent method is: % % const void *GetImageViewVirtualMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const void *GetImageViewVirtualMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const Quantum *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const Quantum *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height, ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetImageViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; status=SyncCacheViewAuthenticPixels(source->view,source->exception); if (status == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UpdateImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** © 2009-2019 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) #include <fcntl.h> /* O_BINARY */ #include <io.h> /* setmode() */ #include <locale.h> /* UTF-8 locale */ #else #include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (September 2021)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(local_liq, log_callback_buferred, &buf); liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwhile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc((size_t)output_image->height * (size_t)output_image->width); output_image->row_pointers = malloc((size_t)output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
atomic_messages.c
// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp -fopenmp-version=50 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd -fopenmp-version=50 -ferror-limit 100 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp atomic read argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; } goto L2; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; } void hint() { int a = 0; #pragma omp atomic hint // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected '(' after 'hint'}} a += 1; #pragma omp atomic hint( // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(+ // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(a // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expression is not an integer constant expression}} a += 1; #pragma omp atomic hint(a) // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} omp50-error {{expression is not an integer constant expression}} a += 1; #pragma omp atomic hint(1) hint(1) // omp45-error 2 {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{directive '#pragma omp atomic' cannot contain more than one 'hint' clause}} a += 1; }
a.4.1.c
/* { dg-do run } */ #include <omp.h> extern void abort (void); void subdomain (float *x, int istart, int ipoints) { int i; for (i = 0; i < ipoints; i++) x[istart + i] = 123.456; } void sub (float *x, int npoints) { int iam, nt, ipoints, istart; #pragma omp parallel default(shared) private(iam,nt,ipoints,istart) { iam = omp_get_thread_num (); nt = omp_get_num_threads (); ipoints = npoints / nt; /* size of partition */ istart = iam * ipoints; /* starting array index */ if (iam == nt - 1) /* last thread may do more */ ipoints = npoints - istart; subdomain (x, istart, ipoints); } } int main () { int i; float array[10000]; sub (array, 10000); for (i = 0; i < 10000; i++) if (array[i] < 123.45 || array[i] > 123.46) abort (); return 0; }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; if (SupportsMapper) ++MapperCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has 'destroy' clause. class OMPDestroyClause final : public OMPClause { public: /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define OMP_CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMPKinds.def" default: break; } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMPKinds.def" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
convolutiondepthwise_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = static_cast<int>(round(v)); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const int32_t *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); float *outptr = out; const float scale_dequant = scales_dequant[p]; //const float bias0 = bias ? bias[p] * scale_dequant : 0.f; const int32_t bias0 = bias ? bias[p] : 0; //out.fill(bias0); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; //*outptr += (float)sum * scale_dequant; *outptr += (sum + bias0) * scale_dequant; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; const int32_t *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); float *outptr = out; const float scale_dequant = scales_dequant[p]; //const float bias0 = bias ? bias[p] * scale_dequant : 0.f; const int32_t bias0 = bias ? bias[p] : 0; //out.fill(bias0); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; //*outptr += (float)sum * scale_dequant; *outptr += (sum + bias0) * scale_dequant; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); signed char *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option &opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); signed char *outptr = out; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; const signed char *kernel0 = (const signed char *)kernel + p * 9; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
while-task.c
void foo() { while (1) #pragma omp task { } }
bitfield-1.c
#include <assert.h> #define ASSIGN_SX(N) \ s##N.a1 = 1; \ s##N.a2 = 2; \ s##N.a3 = 3; \ s##N.a4 = 4; \ s##N.a5 = 5; \ s##N.a6 = 6; \ s##N.a7 = 7; \ s##N.a8 = 8; \ s##N.a9 = 9; \ s##N.a10 = 10; #define ASSERT_SX(N) \ assert (s##N.a1 == 1); \ assert (s##N.a2 == 2); \ assert (s##N.a3 == 3); \ assert (s##N.a4 == 4); \ assert (s##N.a5 == 5); \ assert (s##N.a6 == 6); \ assert (s##N.a7 == 7); \ assert (s##N.a8 == 8); \ assert (s##N.a9 == 9); \ assert (s##N.a10 == 10); struct S1 { unsigned a : 10; unsigned b : 20; }; struct S2 { unsigned a1 : 10; unsigned a2 : 10; unsigned a3 : 10; unsigned a4 : 10; unsigned a5 : 10; unsigned a6 : 10; unsigned a7 : 10; unsigned a8 : 10; unsigned a9 : 10; unsigned a10 : 10; }; struct S3 { unsigned a1 : 10; unsigned a2 : 9; unsigned a3 : 8; unsigned a4 : 7; unsigned a5 : 6; unsigned a6 : 5; unsigned a7 : 6; unsigned a8 : 7; unsigned a9 : 8; unsigned a10 : 9; }; struct S4 { unsigned a1 : 10; int a2 : 9; unsigned a3 : 8; int a4 : 7; unsigned a5 : 6; int a6 : 5; unsigned a7 : 6; int a8 : 7; unsigned a9 : 8; int a10 : 9; }; struct S5 { unsigned a1 : 31; int a2 : 9; unsigned a3 : 17; int a4 : 7; unsigned a5 : 6; int a6 : 5; unsigned long a7 : 55; int a8 : 7; unsigned a9 : 8; int a10 : 9; }; int main () { struct S1 s1; #pragma omp target map(to: s1) { s1.a = 2; s1.b = 3; } assert (s1.a == 2); assert (s1.b == 3); struct S2 s2; #pragma omp target map(to: s2) { ASSIGN_SX (2) } ASSERT_SX (2) struct S3 s3; #pragma omp target map(to: s3) { ASSIGN_SX (3) } ASSERT_SX (3) struct S4 s4; #pragma omp target map(to: s4) { ASSIGN_SX (4) } ASSERT_SX (4) struct S4 s5; s5.a1 = 0; s5.a2 = 1; s5.a3 = 2; s5.a4 = 3; s5.a5 = 4; s5.a6 = 5; s5.a7 = 6; s5.a8 = 7; s5.a9 = 8; s5.a10 = 9; #pragma omp target map(to: s5) { s5.a1++; s5.a2++; s5.a3++; s5.a4++; s5.a5++; s5.a6++; s5.a7++; s5.a8++; s5.a9++; s5.a10++; } ASSERT_SX (5) return 0; }
sparselu.balance.c
#include "hclib.h" #include <omp.h> int ____num_tasks[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include "bots.h" #include "sparselu.h" /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if ( r_err == 0.0 ) continue; if (r_err < 0.0 ) r_err = -r_err; if ( M[i*bots_arg_size_1+j] == 0 ) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]); return FALSE; } r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); return FALSE; } } } return TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; int a=0,b=0; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ a++; M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { bots_message("Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { b++; M[ii*bots_arg_size+jj] = NULL; } } } bots_debug("allo = %d, no = %d, total = %d, factor = %f\n",a,b,a+b,(float)((float)a/(float)(a+b))); } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; bots_message("Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");} else bots_message(" "); } bots_message("\n"); } bots_message("\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { bots_message("Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } void sparselu_par_call(float **BENCH) { int ii, jj, kk; { bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel private(kk) ; { #pragma omp single ; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { #ifdef HCLIB_TASK_UNTIED #pragma omp task firstprivate(kk, jj) shared(BENCH) untied #else #pragma omp task firstprivate(kk, jj) shared(BENCH) #endif ; { ____num_tasks[omp_get_thread_num()]++; { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } ; } } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { #ifdef HCLIB_TASK_UNTIED #pragma omp task firstprivate(kk, ii) shared(BENCH) untied #else #pragma omp task firstprivate(kk, ii) shared(BENCH) #endif ; { ____num_tasks[omp_get_thread_num()]++; { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } ; } } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { #ifdef HCLIB_TASK_UNTIED #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) untied #else #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) #endif ; { ____num_tasks[omp_get_thread_num()]++; { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } ; } } } } bots_message(" completed!\n"); } ; { int __i; assert(omp_get_max_threads() <= 32); for (__i = 0; __i < omp_get_max_threads(); __i++) { fprintf(stderr, "Thread %d: %d\n", __i, ____num_tasks[__i]); } } } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
opt2.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "omp.h" #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; int CUTOFF; #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length,int i) { if (length < MIN_MERGE_SIZE*2L || omp_in_final()) { // Base case basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition #pragma omp task final(i == CUTOFF) depend(in: left[0], right[0]) merge(n, left, right, result, start, length/2,i+1); #pragma omp task final(i == CUTOFF) depend(in: left[length/2], right[length/2]) merge(n, left, right, result, start + length/2, length/2,i+1); #pragma omp taskwait } } void multisort(long n, T data[n], T tmp[n],int i) { //if(omp_in_final()) printf ("\na\n"); if (n >= MIN_SORT_SIZE*4L && !omp_in_final()) { // Recursive decomposition #pragma omp task final(i == CUTOFF) depend(out: data[0]) multisort(n/4L, &data[0], &tmp[0],i+1); #pragma omp task final(i == CUTOFF) depend(out: data[n/4L]) multisort(n/4L, &data[n/4L], &tmp[n/4L],i+1); #pragma omp task final(i == CUTOFF) depend(out: data[n/2L]) multisort(n/4L, &data[n/2L], &tmp[n/2L],i+1); #pragma omp task final(i == CUTOFF) depend(out: data[3L*n/4L]) multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L],i+1); #pragma omp taskwait #pragma omp task final(i == CUTOFF) depend(in: data[0], data[n/4L]) depend(out: tmp[0]) merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,i+1); #pragma omp task final(i == CUTOFF) depend(in: data[N/2L], data[3L*n/4L]) depend(out: tmp[n/2L]) merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,i+1); #pragma omp task final(i == CUTOFF) depend(in: tmp[0], tmp[n/2L]) merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,i+1); #pragma omp taskwait } else { // Base case basicsort(n, data); } } static void initialize(long length, T data[length]) { #pragma omp taskloop grainsize(length/omp_get_max_threads()) for (long i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { #pragma omp taskloop grainsize(length/omp_get_max_threads()) for (long i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); } int main(int argc, char **argv) { /* Defaults for command line arguments */ /* Important: all of them should be powers of two */ N = 32768 * 1024; MIN_SORT_SIZE = 1024; MIN_MERGE_SIZE = 1024; CUTOFF = 4; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-n")==0) { N = atol(argv[++i]) * 1024; } else if (strcmp(argv[i], "-s")==0) { MIN_SORT_SIZE = atol(argv[++i]); } else if (strcmp(argv[i], "-m")==0) { MIN_MERGE_SIZE = atol(argv[++i]); } #ifdef _OPENMP else if (strcmp(argv[i], "-c")==0) { CUTOFF = atoi(argv[++i]); } #endif else { #ifdef _OPENMP fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE] -c CUTOFF\n", argv[0]); #else fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]); #endif fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n"); fprintf(stderr, " -s to specify the size of the vector (in elements) that breaks recursion in the sort phase (default 1024)\n"); fprintf(stderr, " -m to specify the size of the vector (in elements) that breaks recursion in the merge phase (default 1024)\n"); #ifdef _OPENMP fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 16)\n"); #endif return EXIT_FAILURE; } } fprintf(stdout, "*****************************************************************************************\n"); fprintf(stdout, "Problem size (in number of elements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/1024, MIN_SORT_SIZE, MIN_MERGE_SIZE); #ifdef _OPENMP fprintf(stdout, "Cut-off level: CUTOFF=%d\n", CUTOFF); fprintf(stdout, "Number of threads in OpenMP: OMP_NUM_THREADS=%d\n", omp_get_max_threads()); #endif fprintf(stdout, "*****************************************************************************************\n"); T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; #pragma omp parallel #pragma omp single initialize(N, data); #pragma omp parallel #pragma omp single clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp,0); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); fprintf(stdout, "*****************************************************************************************\n"); return 0; }
cover-test.c
/* * Copyright © 2015 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ /* * This test aims to verify both numerical correctness and the honouring of * array bounds for scaled plots (both nearest-neighbour and bilinear) at or * close to the boundary conditions for applicability of "cover" type fast paths * and iter fetch routines. * * It has a secondary purpose: by setting the env var EXACT (to any value) it * will only test plots that are exactly on the boundary condition. This makes * it possible to ensure that "cover" routines are being used to the maximum, * although this requires the use of a debugger or code instrumentation to * verify. */ #include "utils.h" #include <stdlib.h> #include <stdio.h> /* Approximate limits for random scale factor generation - these ensure we can * get at least 8x reduction and 8x enlargement. */ #define LOG2_MAX_FACTOR (3) /* 1/sqrt(2) (or sqrt(0.5), or 2^-0.5) as a 0.32 fixed-point number */ #define INV_SQRT_2_0POINT32_FIXED (0xB504F334u) /* The largest increment that can be generated by random_scale_factor(). * This occurs when the "mantissa" part is 0xFFFFFFFF and the "exponent" * part is -LOG2_MAX_FACTOR. */ #define MAX_INC ((pixman_fixed_t) \ (INV_SQRT_2_0POINT32_FIXED >> (31 - 16 - LOG2_MAX_FACTOR))) /* Minimum source width (in pixels) based on a typical page size of 4K and * maximum colour depth of 32bpp. */ #define MIN_SRC_WIDTH (4096 / 4) /* Derive the destination width so that at max increment we fit within source */ #define DST_WIDTH (MIN_SRC_WIDTH * pixman_fixed_1 / MAX_INC) /* Calculate heights the other way round. * No limits due to page alignment here. */ #define DST_HEIGHT 3 #define SRC_HEIGHT ((DST_HEIGHT * MAX_INC + pixman_fixed_1 - 1) / pixman_fixed_1) /* At the time of writing, all the scaled fast paths use SRC, OVER or ADD * Porter-Duff operators. XOR is included in the list to ensure good * representation of iter scanline fetch routines. */ static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_XOR, }; /* At the time of writing, all the scaled fast paths use a8r8g8b8, x8r8g8b8 * or r5g6b5, or red-blue swapped versions of the same. When a mask channel is * used, it is always a8 (and so implicitly not component alpha). a1r5g5b5 is * included because it is the only other format to feature in any iters. */ static const pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_r5g6b5, PIXMAN_a1r5g5b5 }; /* This is a flag reflecting the environment variable EXACT. It can be used * to ensure that source coordinates corresponding exactly to the "cover" limits * are used, rather than any "near misses". This can, for example, be used in * conjunction with a debugger to ensure that only COVER fast paths are used. */ static int exact; static pixman_image_t * create_src_image (pixman_format_code_t fmt) { pixman_image_t *tmp_img, *img; /* We need the left-most and right-most MIN_SRC_WIDTH pixels to have * predictable values, even though fence_image_create_bits() may allocate * an image somewhat larger than that, by an amount that varies depending * upon the page size on the current platform. The solution is to create a * temporary non-fenced image that is exactly MIN_SRC_WIDTH wide and blit it * into the fenced image. */ tmp_img = pixman_image_create_bits (fmt, MIN_SRC_WIDTH, SRC_HEIGHT, NULL, 0); if (tmp_img == NULL) return NULL; img = fence_image_create_bits (fmt, MIN_SRC_WIDTH, SRC_HEIGHT, TRUE); if (img == NULL) { pixman_image_unref (tmp_img); return NULL; } prng_randmemset (tmp_img->bits.bits, tmp_img->bits.rowstride * SRC_HEIGHT * sizeof (uint32_t), 0); image_endian_swap (tmp_img); pixman_image_composite (PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, 0, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_composite (PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, img->bits.width - MIN_SRC_WIDTH, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_unref (tmp_img); return img; } static pixman_fixed_t random_scale_factor(void) { /* Get a random number with top bit set. */ uint32_t f = prng_rand () | 0x80000000u; /* In log(2) space, this is still approximately evenly spread between 31 * and 32. Divide by sqrt(2) to centre the distribution on 2^31. */ f = ((uint64_t) f * INV_SQRT_2_0POINT32_FIXED) >> 32; /* Now shift right (ie divide by an integer power of 2) to spread the * distribution between centres at 2^(16 +/- LOG2_MAX_FACTOR). */ f >>= 31 - 16 + prng_rand_n (2 * LOG2_MAX_FACTOR + 1) - LOG2_MAX_FACTOR; return f; } static pixman_fixed_t calc_translate (int dst_size, int src_size, pixman_fixed_t scale, pixman_bool_t low_align, pixman_bool_t bilinear) { pixman_fixed_t ref_src, ref_dst, scaled_dst; if (low_align) { ref_src = bilinear ? pixman_fixed_1 / 2 : pixman_fixed_e; ref_dst = pixman_fixed_1 / 2; } else { ref_src = pixman_int_to_fixed (src_size) - bilinear * pixman_fixed_1 / 2; ref_dst = pixman_int_to_fixed (dst_size) - pixman_fixed_1 / 2; } scaled_dst = ((uint64_t) ref_dst * scale + pixman_fixed_1 / 2) / pixman_fixed_1; /* We need the translation to be set such that when ref_dst is fed through * the transformation matrix, we get ref_src as the result. */ return ref_src - scaled_dst; } static pixman_fixed_t random_offset (void) { pixman_fixed_t offset = 0; /* Ensure we test the exact case quite a lot */ if (prng_rand_n (2)) return offset; /* What happens when we are close to the edge of the first * interpolation step? */ if (prng_rand_n (2)) offset += (pixman_fixed_1 >> BILINEAR_INTERPOLATION_BITS) - 16; /* Try fine-grained variations */ offset += prng_rand_n (32); /* Test in both directions */ if (prng_rand_n (2)) offset = -offset; return offset; } static void check_transform (pixman_image_t *dst_img, pixman_image_t *src_img, pixman_transform_t *transform, pixman_bool_t bilinear) { pixman_vector_t v1, v2; v1.vector[0] = pixman_fixed_1 / 2; v1.vector[1] = pixman_fixed_1 / 2; v1.vector[2] = pixman_fixed_1; assert (pixman_transform_point (transform, &v1)); v2.vector[0] = pixman_int_to_fixed (dst_img->bits.width) - pixman_fixed_1 / 2; v2.vector[1] = pixman_int_to_fixed (dst_img->bits.height) - pixman_fixed_1 / 2; v2.vector[2] = pixman_fixed_1; assert (pixman_transform_point (transform, &v2)); if (bilinear) { assert (v1.vector[0] >= pixman_fixed_1 / 2); assert (v1.vector[1] >= pixman_fixed_1 / 2); assert (v2.vector[0] <= pixman_int_to_fixed (src_img->bits.width) - pixman_fixed_1 / 2); assert (v2.vector[1] <= pixman_int_to_fixed (src_img->bits.height) - pixman_fixed_1 / 2); } else { assert (v1.vector[0] >= pixman_fixed_e); assert (v1.vector[1] >= pixman_fixed_e); assert (v2.vector[0] <= pixman_int_to_fixed (src_img->bits.width)); assert (v2.vector[1] <= pixman_int_to_fixed (src_img->bits.height)); } } static uint32_t test_cover (int testnum, int verbose) { pixman_fixed_t x_scale, y_scale; pixman_bool_t left_align, top_align; pixman_bool_t bilinear; pixman_filter_t filter; pixman_op_t op; size_t src_fmt_index; pixman_format_code_t src_fmt, dst_fmt, mask_fmt; pixman_image_t *src_img, *dst_img, *mask_img; pixman_transform_t src_transform, mask_transform; pixman_fixed_t fuzz[4]; uint32_t crc32; /* We allocate one fenced image for each pixel format up-front. This is to * avoid spending a lot of time on memory management rather than on testing * Pixman optimisations. We need one per thread because the transformation * matrices and filtering are properties of the source and mask images. */ static pixman_image_t *src_imgs[ARRAY_LENGTH (img_fmt_list)]; static pixman_image_t *mask_bits_img; static pixman_bool_t fence_images_created; #ifdef USE_OPENMP #pragma omp threadprivate (src_imgs) #pragma omp threadprivate (mask_bits_img) #pragma omp threadprivate (fence_images_created) #endif if (!fence_images_created) { int i; prng_srand (0); for (i = 0; i < ARRAY_LENGTH (img_fmt_list); i++) src_imgs[i] = create_src_image (img_fmt_list[i]); mask_bits_img = create_src_image (PIXMAN_a8); fence_images_created = TRUE; } prng_srand (testnum); x_scale = random_scale_factor (); y_scale = random_scale_factor (); left_align = prng_rand_n (2); top_align = prng_rand_n (2); bilinear = prng_rand_n (2); filter = bilinear ? PIXMAN_FILTER_BILINEAR : PIXMAN_FILTER_NEAREST; op = op_list[prng_rand_n (ARRAY_LENGTH (op_list))]; dst_fmt = img_fmt_list[prng_rand_n (ARRAY_LENGTH (img_fmt_list))]; dst_img = pixman_image_create_bits (dst_fmt, DST_WIDTH, DST_HEIGHT, NULL, 0); prng_randmemset (dst_img->bits.bits, dst_img->bits.rowstride * DST_HEIGHT * sizeof (uint32_t), 0); image_endian_swap (dst_img); src_fmt_index = prng_rand_n (ARRAY_LENGTH (img_fmt_list)); src_fmt = img_fmt_list[src_fmt_index]; src_img = src_imgs[src_fmt_index]; pixman_image_set_filter (src_img, filter, NULL, 0); pixman_transform_init_scale (&src_transform, x_scale, y_scale); src_transform.matrix[0][2] = calc_translate (dst_img->bits.width, src_img->bits.width, x_scale, left_align, bilinear); src_transform.matrix[1][2] = calc_translate (dst_img->bits.height, src_img->bits.height, y_scale, top_align, bilinear); if (prng_rand_n (2)) { /* No mask */ mask_fmt = PIXMAN_null; mask_img = NULL; } else if (prng_rand_n (2)) { /* a8 bitmap mask */ mask_fmt = PIXMAN_a8; mask_img = mask_bits_img; pixman_image_set_filter (mask_img, filter, NULL, 0); pixman_transform_init_scale (&mask_transform, x_scale, y_scale); mask_transform.matrix[0][2] = calc_translate (dst_img->bits.width, mask_img->bits.width, x_scale, left_align, bilinear); mask_transform.matrix[1][2] = calc_translate (dst_img->bits.height, mask_img->bits.height, y_scale, top_align, bilinear); } else { /* Solid mask */ pixman_color_t color; memset (&color, 0xAA, sizeof color); mask_fmt = PIXMAN_solid; mask_img = pixman_image_create_solid_fill (&color); } if (!exact) { int i = 0; while (i < 4) fuzz[i++] = random_offset (); src_transform.matrix[0][2] += fuzz[0]; src_transform.matrix[1][2] += fuzz[1]; mask_transform.matrix[0][2] += fuzz[2]; mask_transform.matrix[1][2] += fuzz[3]; } pixman_image_set_transform (src_img, &src_transform); if (mask_fmt == PIXMAN_a8) pixman_image_set_transform (mask_img, &mask_transform); if (verbose) { printf ("op=%s\n", operator_name (op)); printf ("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name (src_fmt), format_name (dst_fmt), format_name (mask_fmt)); printf ("x_scale=0x%08X, y_scale=0x%08X, align %s/%s, %s\n", x_scale, y_scale, left_align ? "left" : "right", top_align ? "top" : "bottom", bilinear ? "bilinear" : "nearest"); if (!exact) { int i = 0; printf ("fuzz factors"); while (i < 4) printf (" %d", fuzz[i++]); printf ("\n"); } } if (exact) { check_transform (dst_img, src_img, &src_transform, bilinear); if (mask_fmt == PIXMAN_a8) check_transform (dst_img, mask_img, &mask_transform, bilinear); } pixman_image_composite (op, src_img, mask_img, dst_img, 0, 0, 0, 0, 0, 0, dst_img->bits.width, dst_img->bits.height); if (verbose) print_image (dst_img); crc32 = compute_crc32_for_image (0, dst_img); pixman_image_unref (dst_img); if (mask_fmt == PIXMAN_solid) pixman_image_unref (mask_img); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM_FUZZ 0x6B56F607 #define CHECKSUM_EXACT 0xA669F4A3 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM_FUZZ 0x83119ED0 #define CHECKSUM_EXACT 0x0D3382CD #else #define CHECKSUM_FUZZ 0x00000000 #define CHECKSUM_EXACT 0x00000000 #endif int main (int argc, const char *argv[]) { unsigned long page_size; page_size = fence_get_page_size (); if (page_size == 0 || page_size > 16 * 1024) return 77; /* automake SKIP */ exact = getenv ("EXACT") != NULL; if (exact) printf ("Doing plots that are exactly aligned to boundaries\n"); return fuzzer_test_main ("cover", 2000000, exact ? CHECKSUM_EXACT : CHECKSUM_FUZZ, test_cover, argc, argv); }
neighbor.h
#pragma once class ExPair{ public: PS::S32 id_in; PS::S32 id_out; PS::S32 id_cluster; PS::S32 * rank_list; static PS::S32 size; static PS::S32 rem; static PS::S32 n_bit; static void initialize() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_bit = 8 * sizeof(PS::S32); size = (PS::S32)std::ceil((PS::F64)n_proc/n_bit); rem = n_bit*size - n_proc; } static PS::S32 getSize() { return size+3; } ExPair(){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_out = id_cluster = 0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(PS::S32 id_in0, PS::S32 id_out0, PS::S32 id_cluster0){ //PS::S32 myrank = PS::Comm::getRank(); id_in = id_in0; id_out = id_out0; id_cluster = id_cluster0; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; //setFlag(myrank); } ExPair(const ExPair & ep){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; rank_list = new PS::S32[size]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = ep.rank_list[i]; } ExPair &operator=(const ExPair & ep){ if ( this != &ep ){ id_in = ep.id_in; id_out = ep.id_out; id_cluster = ep.id_cluster; for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] = ep.rank_list[i]; } return *this; } ~ExPair(){ delete [] rank_list; } PS::S32 getId() const { return id_in; } std::pair<PS::S32,PS::S32> getPair() const { return std::make_pair(id_in, id_out); } PS::S32 getIdCluster() const { return id_cluster; } PS::S32 setIdCluster(PS::S32 id_cluster0) { return id_cluster = id_cluster0; } PS::S32 input(PS::S32 * inp){ id_in = inp[1]; id_out = inp[0]; id_cluster = inp[2]; for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = inp[i+3]; return size+3; } PS::S32 output(PS::S32 * outp){ outp[0] = id_in; outp[1] = id_out; outp[2] = id_cluster; for ( PS::S32 i=0; i<size; i++ ) outp[i+3] = rank_list[i]; return size+3; } bool checkFlag(const PS::S32 i) const { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; return rank_list[n] & (1<<ii); } void setFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] |= (1<<ii); } void unsetFlag(const PS::S32 i) { PS::S32 n = i / n_bit; PS::S32 ii = i - n_bit * n; rank_list[n] &= ~(1<<ii); } void resetFlag() { for ( PS::S32 i=0; i<size; i++ ) rank_list[i] = 0; } bool equalFlag(const ExPair & ep) const { bool check = true; for ( PS::S32 i=0; i<size; i++ ) check &= (rank_list[i]==ep.rank_list[i]); return check; } PS::S32 getMinFlag() const { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); for (PS::S32 i=0; i<n_proc; i++) if ( checkFlag(i) ) return i; return n_proc; } void operator &= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] &= ep.rank_list[i]; } void operator |= (const ExPair & ep) { for ( PS::S32 i=0; i<size; i++ ) this->rank_list[i] |= ep.rank_list[i]; } bool exchange(const ExPair & ep) { bool check = (this->id_cluster != ep.id_cluster); this->id_cluster = std::min(this->id_cluster, ep.id_cluster); for ( PS::S32 i=0; i<size; i++ ) { check |= (this->rank_list[i] != ep.rank_list[i]); this->rank_list[i] |= ep.rank_list[i]; } return check; } void show(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); std::cout << PS::Comm::getRank() << "\t" << id_in << "\t" << id_out << "\t" << id_cluster << "\t"; for ( PS::S32 i=0; i<n_proc; i++ ) std::cout << (checkFlag(i)); std::cout << std::endl; } }; PS::S32 ExPair::size; PS::S32 ExPair::rem; PS::S32 ExPair::n_bit; class NeighborList{ public: std::vector<std::vector<PS::S32> > n_list; std::map<PS::S32, PS::S32> id_map; std::vector<PS::S32> with_neighbor_list; std::vector<std::pair<PS::S32, PS::S32> > pair_list; std::vector<std::pair<PS::S32,PS::S32> > ex_list; std::vector<std::pair<PS::S32,PS::S32> > ex_adr_list; std::vector<PS::S32> connected_list; std::vector<std::vector<ExPair> > ex_data; std::map<std::pair<PS::S32,PS::S32>, std::pair<PS::S32, PS::S32> > ex_data_map; std::vector<std::vector<PS::S32> > recv_list; std::vector<std::vector<PS::S32> > send_list; std::vector<PS::S32> recv_rank_list; std::vector<PS::S32> send_rank_list; std::vector<PS::S32> & operator[](PS::S32 i){ return n_list[i]; } NeighborList() { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); n_list.clear(); id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); ex_data.resize(n_proc); recv_list.resize(n_proc); send_list.resize(n_proc); #pragma omp parallel for for (PS::S32 i=0; i<n_proc; i++){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } ExPair::initialize(); } template <class Tpsys> void initializeList(Tpsys & pp) { const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_loc = pp.getNumberOfParticleLocal(); n_list.clear(); //id_map.clear(); with_neighbor_list.clear(); pair_list.clear(); ex_list.clear(); ex_adr_list.clear(); connected_list.clear(); ex_data_map.clear(); recv_rank_list.clear(); send_rank_list.clear(); #pragma omp parallel for for ( PS::S32 i=0; i<n_proc; i++ ){ ex_data[i].clear(); recv_list[i].clear(); send_list[i].clear(); } n_list.resize(n_loc); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++) n_list.at(i).clear(); } ExPair & getExData(std::pair<PS::S32, PS::S32> adr) { return ex_data[adr.first][adr.second]; } PS::S32 getNumberOfParticlesWithNeighbor() const { return with_neighbor_list.size(); } PS::S32 getNumberOfNeighborPairsLocal() const { return pair_list.size(); } PS::S32 getNumberOfRankSend() const { return send_rank_list.size(); } PS::S32 getNumberOfRankRecv() const { return recv_rank_list.size(); } PS::S32 getNumberOfRankConnected() const { return connected_list.size(); } PS::S32 getNumberOfPairConnected(const PS::S32 ii) const { return ex_data[connected_list.at(ii)].size(); } template <class Tpsys> void addNeighbor(Tpsys & pp, PS::S32 i, PS::S32 j_id, PS::S32 j_rank, PS::S32 j_id_local=-1) { n_list[i].push_back(j_id); pp[i].neighbor ++; pp[i].id_cluster = std::min(pp[i].id_cluster, j_id); if ( j_rank != pp[i].myrank ) { #pragma omp critical { ex_list.push_back(std::make_pair(pp[i].id, j_id)); ex_adr_list.push_back(std::make_pair(j_rank, ex_data.at(j_rank).size())); ex_data_map[std::make_pair(pp[i].id, j_id)] = std::make_pair(j_rank, ex_data.at(j_rank).size()); ExPair ex_pair(pp[i].id, j_id, pp[i].id_cluster); ex_pair.setFlag(pp[i].myrank); ex_pair.setFlag(j_rank); ex_data.at(j_rank).push_back(ex_pair); } pp[i].inDomain = false; } else { if ( j_id_local < 0 ) j_id_local = id_map.at(j_id); if ( i<j_id_local ) { #pragma omp critical { pair_list.push_back(std::make_pair(i, j_id_local)); } } } } template <class Tpsys> void checkNeighbor(Tpsys & pp) { const PS::S32 n_loc = n_list.size(); bool check = true; PS::S32 nei_tot = 0; for ( PS::S32 i=0; i<n_loc; i++ ) { if ( !pp[i].isDead ) assert ( id_map.at(pp[i].id) == i ); } for ( PS::S32 i=0; i<n_loc; i++ ) { PS::S32 n_ngb = n_list.at(i).size(); //if ( pp[i].neighbor ) // std::cout << pp[i].id << "\t"; nei_tot += n_ngb; for ( PS::S32 jj=0; jj<n_ngb; jj++ ) { PS::S32 j_id = n_list.at(i).at(jj); //if ( pp[i].neighbor ) // std::cout << j_id << " "; auto itr = id_map.find(j_id); if ( itr == id_map.end() ) continue; PS::S32 j = itr->second; PS::S32 n_ngb_j = n_list.at(j).size(); PS::S32 n_p = 0; for ( PS::S32 k=0; k<n_ngb_j; k++ ) { PS::S32 k_id = n_list.at(j).at(k); auto itr1 = id_map.find(k_id); if ( itr1 == id_map.end() ) continue; if ( (itr1->second) == i ) n_p ++ ; } if ( n_p != 1 ) { std::cout << i << "\t" << pp[i].id << "\t" << j << "\t" << j_id << std::endl; std::cout << "Neighbor of " << pp[i].id << ": "; for (PS::S32 k=0; k<n_list.at(i).size(); k++) std::cout << n_list.at(i).at(k) << "\t"; std::cout << std::endl; std::cout << "Neighbor of " << j_id << ": "; for (PS::S32 k=0; k<n_list.at(j).size(); k++) std::cout << n_list.at(j).at(k) << "\t"; std::cout << std::endl; check = check && false; check = check && false; } } //if ( pp[i].neighbor ) // std::cout << std::endl; } PS::S32 nei_tot_glb = PS::Comm::getSum(nei_tot); assert ( nei_tot_glb%2 == 0 ); if ( false ) { PS::Abort(); } } void createConnectedRankList(){ const PS::S32 n_proc = PS::Comm::getNumberOfProc(); connected_list.clear(); for ( PS::S32 i=0; i<n_proc; i++ ) { if ( ex_data[i].size() ) { connected_list.push_back(i); assert( i != PS::Comm::getRank() ); } } } template <class Tpsys> void makeIdMap(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); id_map.clear(); //assert( (PS::S32)(n_list.size()) == n_loc ); for(PS::S32 i=0; i<n_loc; i++){ //assert( pp[i].neighbor == (PS::S32)(n_list[i].size()) ); if ( !pp[i].isDead ) { id_map[pp[i].id] = i; } } } #if 1 template <class Tpsys> void createNeighborCluster(Tpsys & pp){ //const PS::S32 n_loc = pp.getNumberOfParticleLocal(); const PS::S32 n_wngb = with_neighbor_list.size(); const PS::S32 n_pair = pair_list.size(); bool check = true; while( check ){ check = false; #pragma omp parallel for reduction (||:check) for(PS::S32 ii=0; ii<n_pair; ii++){ PS::S32 i = pair_list.at(ii).first; PS::S32 j = pair_list.at(ii).second; if ( pp[i].id_cluster != pp[j].id_cluster ) { #pragma omp critical { pp[i].id_cluster = pp[j].id_cluster = std::min(pp[i].id_cluster, pp[j].id_cluster); } check = check || true; } } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 ii=0; ii<n_wngb; ii++){ PS::S32 i = with_neighbor_list.at(ii); for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #else template <class Tpsys> void createNeighborCluster(Tpsys & pp){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); PS::S32 j_id_cluster = 0; PS::S32 id_cluster[n_loc]; bool check = true; while( check ){ check = false; #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ PS::S32 j_id = 0; PS::S32 nei = 0; nei = pp[i].neighbor; id_cluster[i] = pp[i].id_cluster; if(nei == 0) continue; for(PS::S32 j=0; j<nei; j++){ auto itr = id_map.find(n_list[i].at(j)); if ( itr == id_map.end() ) continue; j_id = itr->second; j_id_cluster = pp[j_id].id_cluster; if( id_cluster[i] > j_id_cluster ) id_cluster[i] = j_id_cluster; } } #pragma omp parallel for reduction (||:check) for(PS::S32 i=0; i<n_loc; i++){ if ( pp[i].id_cluster != id_cluster[i] ) { check = check || true; pp[i].id_cluster = id_cluster[i]; } assert( pp[i].id >= id_cluster[i] ); } } if( ex_list.size() != 0 ){ PS::S32 n_out = ex_list.size(); #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ for(PS::S32 j=0; j<n_out; j++){ PS::S32 i_out = id_map.at(ex_list.at(j).first); PS::S32 id_cluster_out = pp[i_out].id_cluster; if( pp[i].id_cluster == id_cluster_out ) pp[i].inDomain = false; } } } } #endif template <class Tpsys> void inputExData(Tpsys & pp){ const PS::S32 n_out = ex_list.size(); for ( PS::S32 j=0; j<n_out; j++ ){ std::pair<PS::S32,PS::S32> pair = ex_list.at(j); std::pair<PS::S32,PS::S32> ex_adr = ex_adr_list.at(j); assert( getExData(ex_adr).getId() == pair.first ); getExData(ex_adr).setIdCluster(pp[id_map.at(pair.first)].id_cluster); for ( PS::S32 k=0; k<n_out; k++ ){ if ( k == j ) continue; //std::pair<PS::S32,PS::S32> pair2 = ex_list.at(k); std::pair<PS::S32,PS::S32> ex_adr2 = ex_adr_list.at(k); if ( getExData(ex_adr2).getIdCluster() == getExData(ex_adr).getIdCluster() ) { getExData(ex_adr).exchange(getExData(ex_adr2)); } } } } template <class Tpsys> bool exchangeExData(Tpsys & pp, PS::S32 TAG, PS::S32** & ex_data_send, PS::S32** & ex_data_recv){ //const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_send = connected_list.size(); //PS::S32 ** ex_data_send = new PS::S32*[n_send]; //PS::S32 ** ex_data_recv = new PS::S32*[n_send]; //for ( PS::S32 ii=0; ii<n_send; ii++ ) { // PS::S32 i = connected_list.at(ii); // PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); // ex_data_send[ii] = new PS::S32[n_size]; // ex_data_recv[ii] = new PS::S32[n_size]; //} #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { jj += ex_data[i][j].output(&ex_data_send[ii][jj]); } } #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_size = ex_data[i].size() * ExPair::getSize(); MPI_Isend(&ex_data_send[ii][0], n_size, PS::GetDataType(*ex_data_send[ii]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_data_recv[ii][0], n_size, PS::GetDataType(*ex_data_recv[ii]), i, TAG, MPI_COMM_WORLD, &req1[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); #else assert ( n_send == 0 ); #endif bool check = false; #pragma omp parallel for reduction (||:check) for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = connected_list.at(ii); PS::S32 n_data = ex_data[i].size(); PS::S32 jj = 0; for ( PS::S32 j=0; j<n_data; j++ ) { ExPair recv_pair; jj += recv_pair.input(&ex_data_recv[ii][jj]); std::pair<PS::S32,PS::S32> adr = ex_data_map.at(recv_pair.getPair()); assert ( adr.first == i ); assert ( recv_pair.getPair() == getExData(adr).getPair() ); bool check_1 = getExData(adr).exchange(recv_pair); check = check || check_1; //getExData(adr).show(); #pragma omp critical { PS::S32 i_loc = id_map.at(getExData(adr).getId()); pp[i_loc].id_cluster = std::min(pp[i_loc].id_cluster, getExData(adr).getIdCluster()); } } //delete [] ex_data_send[ii]; //delete [] ex_data_recv[ii]; } //delete [] ex_data_send; //delete [] ex_data_recv; //PS::Comm::barrier(); //bool check_glb = PS::Comm::synchronizeConditionalBranchOR(check); return check; } template <class Tpsys> void selectSendRecvParticle(Tpsys & pp){ const PS::S32 myrank = PS::Comm::getRank(); const PS::S32 n_proc = PS::Comm::getNumberOfProc(); const PS::S32 n_ptcl = ex_list.size(); std::vector<PS::S32> ex_cluster; std::vector<std::pair<PS::S32,PS::S32> > ex_cluster_adr; ex_cluster.clear(); ex_cluster_adr.clear(); for ( PS::S32 ii=0; ii<n_ptcl; ii++ ) { //std::pair<PS::S32,PS::S32> pair = ex_list.at(ii); std::pair<PS::S32,PS::S32> adr = ex_adr_list.at(ii); PS::S32 id_cluster = getExData(adr).id_cluster; PS::S32 n_l = ex_cluster.size(); std::pair<PS::S32,PS::S32> adr2 = std::make_pair(-1,-1); for (PS::S32 j=0; j<n_l; j++){ if ( id_cluster == ex_cluster.at(j) ){ adr2 = ex_cluster_adr.at(j); assert( getExData(adr).equalFlag(getExData(adr2)) ); } } if ( adr2 == std::make_pair(-1,-1) ){ ex_cluster.push_back(id_cluster); ex_cluster_adr.push_back(adr); PS::S32 min_rank = getExData(adr).getMinFlag(); if ( min_rank == myrank ) { for ( PS::S32 j=0; j<n_proc; j++ ) { if ( getExData(adr).checkFlag(j) ) { if ( j == myrank ) continue; recv_list[j].push_back(id_cluster); assert ( j > myrank ); } } } else { assert ( min_rank < myrank ); send_list[min_rank].push_back(id_cluster); } } } for ( PS::S32 i=0; i<n_proc; i++ ) { if ( recv_list[i].size() ) recv_rank_list.push_back(i); if ( send_list[i].size() ) send_rank_list.push_back(i); } } private: void operator =(const NeighborList& NL){} NeighborList(const NeighborList& NL) {} }; template <class Tp> class ExParticleSystem { public : PS::S32 n_send; PS::S32 n_recv; PS::S32 n_ex_ptcl_send_tot; PS::S32 n_ex_nei_send_tot; PS::S32 n_ex_ptcl_recv_tot; PS::S32 n_ex_nei_recv_tot; std::vector<Tp> ex_ptcl_send; std::vector<PS::S32> ex_nei_send; std::vector<Tp> ex_ptcl_recv; std::vector<PS::S32> ex_nei_recv; std::vector<std::vector<PS::S32> > ex_ptcl_send_list; std::vector<PS::S32*> n_list; std::vector<PS::S32> n_ex_ptcl_send; std::vector<PS::S32> n_ex_nei_send; std::vector<PS::S32> n_ex_ptcl_recv; std::vector<PS::S32> n_ex_nei_recv; std::vector<PS::S32> adr_ex_ptcl_send; std::vector<PS::S32> adr_ex_nei_send; std::vector<PS::S32> adr_ex_ptcl_recv; std::vector<PS::S32> adr_ex_nei_recv; Tp & operator[](PS::S32 i){ return ex_ptcl_recv[i]; } PS::S32 getNumberOfParticleLocal() const { return n_ex_ptcl_recv_tot; } void initialize() { n_send = n_recv = 0; n_ex_ptcl_send_tot = n_ex_ptcl_recv_tot = 0; n_ex_nei_send_tot = n_ex_nei_recv_tot = 0; ex_ptcl_send.clear(); ex_nei_send.clear(); ex_ptcl_recv.clear(); ex_nei_recv.clear(); ex_ptcl_send_list.clear(); n_ex_ptcl_send.clear(); n_ex_nei_send.clear(); n_ex_ptcl_recv.clear(); n_ex_nei_recv.clear(); adr_ex_ptcl_send.clear(); adr_ex_nei_send.clear(); adr_ex_ptcl_recv.clear(); adr_ex_nei_recv.clear(); } void resize(PS::S32 n_send0, PS::S32 n_recv0){ n_send = n_send0; n_ex_ptcl_send.resize(n_send); n_ex_nei_send.resize(n_send); adr_ex_ptcl_send.resize(n_send); adr_ex_nei_send.resize(n_send); ex_ptcl_send_list.resize(n_send); #pragma omp parallel for for ( PS::S32 i=0; i<n_send; i++ ) ex_ptcl_send_list[i].clear(); n_recv = n_recv0; n_ex_ptcl_recv.resize(n_recv); n_ex_nei_recv.resize(n_recv); adr_ex_ptcl_recv.resize(n_recv); adr_ex_nei_recv.resize(n_recv); } PS::S32 getNumberOfParticleSend() const { return n_ex_ptcl_send_tot; } PS::S32 getNumberOfParticleRecv() const { return n_ex_ptcl_recv_tot; } PS::S32 getNumberOfNeighborSend() const { return n_ex_nei_send_tot; } PS::S32 getNumberOfNeighborRecv() const { return n_ex_nei_recv_tot; } template <class Tpsys> void inputNumberOfExParticleSend(Tpsys & pp, NeighborList & NList){ const PS::S32 n_loc = pp.getNumberOfParticleLocal(); #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) n_ex_ptcl_send[ii] = n_ex_nei_send[ii] = 0; if ( n_send ) { #pragma omp parallel for for ( PS::S32 i=0; i<n_loc; i++) { if ( !pp[i].inDomain ) { for ( PS::S32 jj=0; jj<n_send; jj++ ){ PS::S32 j = NList.send_rank_list[jj]; PS::S32 n_data = NList.send_list[j].size(); for ( PS::S32 k=0; k<n_data; k++ ) { if ( NList.send_list[j][k] == pp[i].id_cluster ) { #pragma omp critical { n_ex_ptcl_send[jj] ++; n_ex_nei_send[jj] += pp[i].neighbor; assert ( pp[i].neighbor == (PS::S32)(NList.n_list[i].size()) ); ex_ptcl_send_list[jj].push_back(i); } } } } } } } #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) assert( ex_ptcl_send_list[ii].size() ); } void sendRecvNumberOfExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&n_ex_ptcl_send[ii], 1, PS::GetDataType(n_ex_ptcl_send[0]), i, TAG, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&n_ex_nei_send[ii], 1, PS::GetDataType(n_ex_nei_send[0]), i, TAG+1, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&n_ex_ptcl_recv[ii], 1, PS::GetDataType(n_ex_ptcl_recv[0]), i, TAG, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&n_ex_nei_recv[ii], 1, PS::GetDataType(n_ex_nei_recv[0]), i, TAG+1, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputAdress(){ n_ex_ptcl_send_tot = n_ex_nei_send_tot = 0; for (PS::S32 i=0; i<n_send; i++){ adr_ex_ptcl_send.at(i) = n_ex_ptcl_send_tot; adr_ex_nei_send.at(i) = n_ex_nei_send_tot; n_ex_ptcl_send_tot += n_ex_ptcl_send.at(i); n_ex_nei_send_tot += n_ex_nei_send.at(i); } n_ex_ptcl_recv_tot = n_ex_nei_recv_tot = 0; for (PS::S32 i=0; i<n_recv; i++){ adr_ex_ptcl_recv.at(i) = n_ex_ptcl_recv_tot; adr_ex_nei_recv.at(i) = n_ex_nei_recv_tot; n_ex_ptcl_recv_tot += n_ex_ptcl_recv.at(i); n_ex_nei_recv_tot += n_ex_nei_recv.at(i); } ex_ptcl_send.resize(n_ex_ptcl_send_tot); ex_nei_send.resize(n_ex_nei_send_tot); ex_ptcl_recv.resize(n_ex_ptcl_recv_tot); ex_nei_recv.resize(n_ex_nei_recv_tot); n_list.resize(n_ex_ptcl_recv_tot); } template <class Tpsys> void inputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); PS::S32 adr_nei = adr_ex_nei_send.at(ii); PS::S32 n_nei = 0; for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); pp[j].isSent = true; ex_ptcl_send.at(adr_ptcl + jj) = pp[j]; assert( !pp[j].inDomain ); for ( PS::S32 k=0; k<pp[j].neighbor; k++ ) { ex_nei_send.at(adr_nei + n_nei) = NList.n_list[j].at(k); n_nei ++; } } assert ( n_ex_nei_send.at(ii) == n_nei ); } } void sendRecvExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Isend(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+2, MPI_COMM_WORLD, &req0[ii]); MPI_Isend(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+3, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Irecv(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+2, MPI_COMM_WORLD, &req2[ii]); MPI_Irecv(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+3, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } void inputNeighborListOfExParticleRecv() { #pragma omp parallel for for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 n_data = n_ex_ptcl_recv.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_recv.at(ii); PS::S32 n_nei = adr_ex_nei_recv.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { n_list.at(adr_ptcl + jj) = &(ex_nei_recv.at(n_nei)); n_nei += ex_ptcl_recv.at(adr_ptcl + jj).neighbor; assert ( ex_ptcl_recv.at(adr_ptcl + jj).isSent ); } if ( ii+1<n_recv ) assert ( adr_ex_nei_recv.at(ii+1) == n_nei ); } } void returnExParticle(NeighborList & NList, PS::S32 TAG = 0){ #ifdef PARTICLE_SIMULATOR_MPI_PARALLEL MPI_Request req0[n_send], req1[n_send]; MPI_Status stat0[n_send], stat1[n_send]; for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 i = NList.send_rank_list[ii]; MPI_Irecv(&ex_ptcl_send[adr_ex_ptcl_send[ii]], n_ex_ptcl_send[ii], PS::GetDataType(ex_ptcl_send[0]), i, TAG+4, MPI_COMM_WORLD, &req0[ii]); MPI_Irecv(&ex_nei_send[adr_ex_nei_send[ii]], n_ex_nei_send[ii], PS::GetDataType(ex_nei_send[0]), i, TAG+5, MPI_COMM_WORLD, &req1[ii]); } MPI_Request req2[n_recv], req3[n_recv]; MPI_Status stat2[n_recv], stat3[n_recv]; for ( PS::S32 ii=0; ii<n_recv; ii++ ) { PS::S32 i = NList.recv_rank_list[ii]; MPI_Isend(&ex_ptcl_recv[adr_ex_ptcl_recv[ii]], n_ex_ptcl_recv[ii], PS::GetDataType(ex_ptcl_recv[0]), i, TAG+4, MPI_COMM_WORLD, &req2[ii]); MPI_Isend(&ex_nei_recv[adr_ex_nei_recv[ii]], n_ex_nei_recv[ii], PS::GetDataType(ex_nei_recv[0]), i, TAG+5, MPI_COMM_WORLD, &req3[ii]); } MPI_Waitall(n_send, req0, stat0); MPI_Waitall(n_send, req1, stat1); MPI_Waitall(n_recv, req2, stat2); MPI_Waitall(n_recv, req3, stat3); #endif } template <class Tpsys> void outputExParticleSend(Tpsys & pp, NeighborList & NList){ #pragma omp parallel for for ( PS::S32 ii=0; ii<n_send; ii++ ) { PS::S32 n_data = n_ex_ptcl_send.at(ii); PS::S32 adr_ptcl = adr_ex_ptcl_send.at(ii); for ( PS::S32 jj=0; jj<n_data; jj++ ) { PS::S32 j = ex_ptcl_send_list[ii].at(jj); PS::S32 id_pre = pp[j].id; pp[j] = ex_ptcl_send.at(adr_ptcl + jj); if (!pp[j].isDead) assert( pp[j].id == id_pre ); } } } };
pack_tril.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include "stdlib.h" #include <complex.h> #include "config.h" #include "np_helper.h" void NPdsymm_triu(int n, double *mat, int hermi) { size_t i, j, j0, j1; if (hermi == HERMITIAN || hermi == SYMMETRIC) { TRIU_LOOP(i, j) { mat[i*n+j] = mat[j*n+i]; } } else { TRIU_LOOP(i, j) { mat[i*n+j] = -mat[j*n+i]; } } } void NPzhermi_triu(int n, double complex *mat, int hermi) { size_t i, j, j0, j1; if (hermi == HERMITIAN) { TRIU_LOOP(i, j) { mat[i*n+j] = conj(mat[j*n+i]); } } else if (hermi == SYMMETRIC) { TRIU_LOOP(i, j) { mat[i*n+j] = mat[j*n+i]; } } else { TRIU_LOOP(i, j) { mat[i*n+j] = -conj(mat[j*n+i]); } } } void NPdunpack_tril(int n, double *tril, double *mat, int hermi) { size_t i, j, ij; for (ij = 0, i = 0; i < n; i++) { for (j = 0; j <= i; j++, ij++) { mat[i*n+j] = tril[ij]; } } if (hermi) { NPdsymm_triu(n, mat, hermi); } } // unpack one row from the compact matrix-tril coefficients void NPdunpack_row(int ndim, int row_id, double *tril, double *row) { int i; size_t idx = (size_t)row_id * (row_id + 1) / 2; NPdcopy(row, tril+idx, row_id); for (i = row_id; i < ndim; i++) { idx += i; row[i] = tril[idx]; } } void NPzunpack_tril(int n, double complex *tril, double complex *mat, int hermi) { size_t i, j, ij; for (ij = 0, i = 0; i < n; i++) { for (j = 0; j <= i; j++, ij++) { mat[i*n+j] = tril[ij]; } } if (hermi) { NPzhermi_triu(n, mat, hermi); } } void NPdpack_tril(int n, double *tril, double *mat) { size_t i, j, ij; for (ij = 0, i = 0; i < n; i++) { for (j = 0; j <= i; j++, ij++) { tril[ij] = mat[i*n+j]; } } } void NPzpack_tril(int n, double complex *tril, double complex *mat) { size_t i, j, ij; for (ij = 0, i = 0; i < n; i++) { for (j = 0; j <= i; j++, ij++) { tril[ij] = mat[i*n+j]; } } } /* out += in[idx[:,None],idy] */ void NPdtake_2d(double *out, double *in, int *idx, int *idy, int odim, int idim, int nx, int ny) { #pragma omp parallel default(none) \ shared(out, in, idx,idy, odim, idim, nx, ny) { size_t i, j; double *pin; #pragma omp for schedule (static) for (i = 0; i < nx; i++) { pin = in + (size_t)idim * idx[i]; for (j = 0; j < ny; j++) { out[i*odim+j] = pin[idy[j]]; } } } } void NPztake_2d(double complex *out, double complex *in, int *idx, int *idy, int odim, int idim, int nx, int ny) { #pragma omp parallel default(none) \ shared(out, in, idx,idy, odim, idim, nx, ny) { size_t i, j; double complex *pin; #pragma omp for schedule (static) for (i = 0; i < nx; i++) { pin = in + (size_t)idim * idx[i]; for (j = 0; j < ny; j++) { out[i*odim+j] = pin[idy[j]]; } } } } /* out[idx[:,None],idy] += in */ void NPdtakebak_2d(double *out, double *in, int *idx, int *idy, int odim, int idim, int nx, int ny, int thread_safe) { if (thread_safe) { #pragma omp parallel default(none) \ shared(out, in, idx,idy, odim, idim, nx, ny) { size_t i, j; double *pout; #pragma omp for schedule (static) for (i = 0; i < nx; i++) { pout = out + (size_t)odim * idx[i]; for (j = 0; j < ny; j++) { pout[idy[j]] += in[i*idim+j]; } } } } else { size_t i, j; double *pout; for (i = 0; i < nx; i++) { pout = out + (size_t)odim * idx[i]; for (j = 0; j < ny; j++) { pout[idy[j]] += in[i*idim+j]; } } } } void NPztakebak_2d(double complex *out, double complex *in, int *idx, int *idy, int odim, int idim, int nx, int ny, int thread_safe) { if (thread_safe) { #pragma omp parallel default(none) \ shared(out, in, idx,idy, odim, idim, nx, ny) { size_t i, j; double complex *pout; #pragma omp for schedule (static) for (i = 0; i < nx; i++) { pout = out + (size_t)odim * idx[i]; for (j = 0; j < ny; j++) { pout[idy[j]] += in[i*idim+j]; } } } } else { size_t i, j; double complex *pout; for (i = 0; i < nx; i++) { pout = out + (size_t)odim * idx[i]; for (j = 0; j < ny; j++) { pout[idy[j]] += in[i*idim+j]; } } } } void NPdunpack_tril_2d(int count, int n, double *tril, double *mat, int hermi) { #pragma omp parallel default(none) \ shared(count, n, tril, mat, hermi) { int ic; size_t nn = n * n; size_t n2 = n*(n+1)/2; #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { NPdunpack_tril(n, tril+n2*ic, mat+nn*ic, hermi); } } } void NPzunpack_tril_2d(int count, int n, double complex *tril, double complex *mat, int hermi) { #pragma omp parallel default(none) \ shared(count, n, tril, mat, hermi) { int ic; size_t nn = n * n; size_t n2 = n*(n+1)/2; #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { NPzunpack_tril(n, tril+n2*ic, mat+nn*ic, hermi); } } } void NPdpack_tril_2d(int count, int n, double *tril, double *mat) { #pragma omp parallel default(none) \ shared(count, n, tril, mat) { int ic; size_t nn = n * n; size_t n2 = n*(n+1)/2; #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { NPdpack_tril(n, tril+n2*ic, mat+nn*ic); } } } void NPzpack_tril_2d(int count, int n, double complex *tril, double complex *mat) { #pragma omp parallel default(none) \ shared(count, n, tril, mat) { int ic; size_t nn = n * n; size_t n2 = n*(n+1)/2; #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { NPzpack_tril(n, tril+n2*ic, mat+nn*ic); } } }
parallel.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> const static void subdomain(float *array, int start, int end) { unsigned int i = 0; for(i=0; i < end; i++) array[start+i] = 123.456; } const static void sub(float *array, int n) { unsigned int this_thread = 0, n_threads = 0, chunck = 0, start = 0; #pragma omp parallel default(shared) private(this_thread, n_threads, chunck, start) { this_thread = omp_get_thread_num(); n_threads = omp_get_num_threads(); /* size of partition */ chunck = n/n_threads; /* satarting array index */ start = this_thread*chunck; /* last thread may do more */ if(this_thread == n_threads-1) chunck = n-start; printf("#%d out of %d\tstarting at: %d with chunck size of it: %d\n", this_thread, n_threads, start, chunck); subdomain(array, start, chunck); } } int main(int argc, char **argv) { unsigned int n = 1000; float *array = malloc(sizeof(float)*1000); sub(array, n); free(array); return 0; }
vtp_fmt_plug.c
/* * Cracker for MD5 based authentication in VTP. * * This software is Copyright (c) 2014 Alexey Lapitsky <lex at * realisticgroup.com> and Dhiru Kholia <dhiru at openwall.com>, and it is * hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without# * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_vtp; #elif FMT_REGISTERS_H john_register_one(&fmt_vtp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> // Tuned on core i7 4-core HT // 64 - 19k // 128 - 27k // 256 - 30.5k ** chosen ** // 512 - 30.5k // 1k - 28.5k // 2k - 28.5k (times wobble) #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 4096 #else #define OMP_SCALE 256 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "vtp" #define FORMAT_NAME "\"MD5 based authentication\" VTP" #define FORMAT_TAG "$vtp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 55 // keep under 1 MD5 block AND this is now tied into logic in vtp_secret_derive() #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" static struct fmt_tests tests[] = { {"$vtp$2$196$14000107000105dc000186a164656661756c740014000105000505dc000186a56368656e6100000010000103000605dc000186a6666666001800020c03ea05dc00018a8a666464692d64656661756c743000030d03eb117800018a8b74726372662d64656661756c7400000001010ccc040103ed0701000208010007090100072000040f03ec05dc00018a8c666464696e65742d64656661756c7400030100012400050d03ed117800018a8d74726272662d64656661756c740000000201000f03010002$80$0201010c646f6d61696e313233343536000000000000000000000000000000000000000000000015000000003134313030393134333631376010913064949d6f47a53b2ad68ef06b0000000106010002$6010913064949d6f47a53b2ad68ef06b", "123"}, {"$vtp$1$184$14000107000105dc000186a164656661756c740014000105000505dc000186a568656c6c6f0000002000020c03ea05dc00018a8a666464692d64656661756c7401010000040100002800031203eb05dc00018a8b746f6b656e2d72696e672d64656661756c74000001010000040100002400040f03ec05dc00018a8c666464696e65742d64656661756c740002010000030100012400050d03ed05dc00018a8d74726e65742d64656661756c740000000201000003010002$77$0101010c646f6d61696e313233343536000000000000000000000000000000000000000000000010000000003134313030393134313432372212dd93025abc600281d74ddda8a21c0101000200$2212dd93025abc600281d74ddda8a21c", "123"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static unsigned char (*secret)[16]; static int *saved_len, dirty; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; /* VTP summary advertisement packet, partially based on original Yersinia code */ typedef struct { unsigned char version; unsigned char code; unsigned char followers; unsigned char domain_name_length; unsigned char domain_name[32]; // zero padded uint32_t revision; // 4 bytes uint32_t updater; // 4 bytes unsigned char update_timestamp[12]; // zero'ed during MAC calculations unsigned char md5_checksum[16]; } vtp_summary_packet; static struct custom_salt { int length; vtp_summary_packet vsp; int vlans_data_length; unsigned char vlans_data[8192]; int salt_length; unsigned char salt[2048]; int trailer_length; int version; unsigned char trailer_data[64]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); saved_len = mem_calloc(sizeof(*saved_len), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); secret = mem_calloc(sizeof(*secret), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(secret); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *ptrkeep; int res; p = ciphertext; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; ptrkeep = strdup(ciphertext); p = &ptrkeep[TAG_LENGTH]; if ((p = strtokm(p, "$")) == NULL) /* version */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res != 1 && res != 2) // VTP version 3 support is pending goto err; // FIXME: fprintf(stderr, ... for version 3? if ((p = strtokm(NULL, "$")) == NULL) /* vlans len */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* vlans data */ goto err; if (strlen(p) / 2 != res) goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt len */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) / 2 != res) goto err; if (!ishexlc(p)) goto err; if (((atoi16[ARCH_INDEX(p[6])]<<4)|atoi16[ARCH_INDEX(p[7])]) > sizeof(cur_salt->vsp.domain_name)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hash */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if (!ishexlc(p)) goto err; MEM_FREE(ptrkeep); return 1; err: MEM_FREE(ptrkeep); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p, *q; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; p = ciphertext; cs.version = atoi(p); q = p + 2; cs.vlans_data_length = atoi(q); q = strchr(q, '$') + 1; // at vlans_data for (i = 0; i < cs.vlans_data_length; i++) cs.vlans_data[i] = (atoi16[ARCH_INDEX(q[2 * i])] << 4) | atoi16[ARCH_INDEX(q[2 * i + 1])]; q = strchr(q, '$') + 1; // at salt_length cs.salt_length = atoi(q); q = strchr(q, '$') + 1; // at salt for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(q[2 * i])] << 4) | atoi16[ARCH_INDEX(q[2 * i + 1])]; if (cs.salt_length > 72) { /* we have trailing bytes */ cs.trailer_length = cs.salt_length - 72; memcpy(cs.trailer_data, cs.salt + 72, cs.trailer_length); } cs.vsp.version = cs.salt[0]; // based on Wireshark cs.vsp.code = cs.salt[1]; // Zero out various fields for MAC calculation cs.vsp.followers = 0; memset(cs.vsp.update_timestamp, 0, 12); memset(cs.vsp.md5_checksum, 0, 16); // fill rest of the data cs.vsp.domain_name_length = cs.salt[3]; if (cs.vsp.domain_name_length > sizeof(cs.vsp.domain_name)) cs.vsp.domain_name_length = sizeof(cs.vsp.domain_name); memcpy(cs.vsp.domain_name, cs.salt + 4, cs.vsp.domain_name_length); memcpy((unsigned char*)&cs.vsp.revision, cs.salt + 36, 4); memcpy((unsigned char*)&cs.vsp.updater, cs.salt + 36 + 4, 4); return (void*)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void vtp_secret_derive(char *password, int length, unsigned char *output) { #if 0 /* old code kept as a easier to read view of what is being done */ MD5_CTX ctx; unsigned char *cp, buf[64]; unsigned int password_idx = 0; int i, j; if (length == 0) { memset(output, 0, 16); return; } MD5_Init(&ctx); for (i = 0; i < 1563; i++) { /* roughly 1 MB */ cp = buf; for (j = 0; j < 64; j++) /* treat password as a cyclic generator */ *cp++ = password[password_idx++ % length]; MD5_Update(&ctx, buf, 64); } MD5_Final(output, &ctx); #else // Speed went from 8k to 28k. I think it should be VERY easy to add SIMD code here. // That would gain us another 4x or so speed. TODO for someone to play with ;) MD5_CTX ctx; unsigned char *cp, buf[55][64]; int bufs_used = 0, local_cnt = 0; int i, j; if (length == 0) { memset(output, 0, 16); return; } cp = buf[bufs_used]; /* treat password as a cyclic generator */ for (;;) { /* note this WILL exit. Modular math assures will do so in 'length' buffers or */ /* less. with PLAINTEXTLEN set to 55 bytes, we only need 55 buffers to assure a cycle */ if (local_cnt + length <= 64) { memcpy(&cp[local_cnt], password, length); local_cnt += length; if (local_cnt == 64) { /* we ended a word at end of buffer, so we have the cycle */ bufs_used++; break; } } else { int spill = local_cnt+length-64; memcpy(&cp[local_cnt], password, length-spill); cp = buf[++bufs_used]; memcpy(cp, &password[length-spill], spill); local_cnt = spill; } } MD5_Init(&ctx); for (i = 0, j=0; i < 1563; ++i) { /* roughly 1 MB */ MD5_Update(&ctx, buf[j++], 64); if (j == bufs_used) j = 0; } MD5_Final(output, &ctx); #endif } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; // space for (secret + SUMMARY ADVERTISEMENT + VLANS DATA + secret) // derive and append "secret", but do it only the FIRST time for a password (not for extra salts). if (dirty) vtp_secret_derive(saved_key[index], saved_len[index], secret[index]); MD5_Init(&ctx); MD5_Update(&ctx, secret[index], 16); // append vtp_summary_packet MD5_Update(&ctx, &cur_salt->vsp, sizeof(vtp_summary_packet)); // add trailing bytes (for VTP version >= 2) if (cur_salt->version != 1) MD5_Update(&ctx, cur_salt->trailer_data, cur_salt->trailer_length); // append vlans_data MD5_Update(&ctx, cur_salt->vlans_data, cur_salt->vlans_data_length); // append "secret" again MD5_Update(&ctx, secret[index], 16); MD5_Final((unsigned char*)crypt_out[index], &ctx); } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void vtp_set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key)); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_vtp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, vtp_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif
VerletClusterCells.h
/** * @file VerletClusterCells.h * @author jspahl * @date 25.3.19 */ #pragma once #include <algorithm> #include <cmath> #include <vector> #include "autopas/cells/FullParticleCell.h" #include "autopas/containers/CellBorderAndFlagManager.h" #include "autopas/containers/ParticleContainer.h" #include "autopas/containers/cellPairTraversals/CellPairTraversal.h" #include "autopas/containers/verletClusterLists/VerletClusterCellsParticleIterator.h" #include "autopas/containers/verletClusterLists/traversals/VerletClusterTraversalInterface.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/CudaDeviceVector.h" namespace autopas { /** * Particles are divided into clusters. * The VerletClusterCells class uses neighborhood lists for each cluster pair * to calculate pairwise interactions. * It is optimized for a constant, i.e. particle independent, cutoff radius of * the interaction. * @tparam Particle */ template <class Particle> class VerletClusterCells : public ParticleContainer<FullParticleCell<Particle>> { public: /** * Constructor of the VerletClusterCells class. * The neighbor lists are build using an estimated density. * The box is divided into cuboids with roughly the * same side length. The rebuildFrequency should be chosen, s.t. the particles do * not move more than a distance of skin/2 between two rebuilds of the lists. * @param boxMin the lower corner of the domain * @param boxMax the upper corner of the domain * @param cutoff the cutoff radius of the interaction * @param skin the skin radius * @param clusterSize size of clusters */ VerletClusterCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff, double skin = 0, int clusterSize = 32) : ParticleContainer<FullParticleCell<Particle>>(boxMin, boxMax, cutoff, skin), _boxMinWithHalo(utils::ArrayMath::subScalar(boxMin, cutoff + skin)), _boxMaxWithHalo(utils::ArrayMath::addScalar(boxMax, cutoff + skin)), _clusterSize(clusterSize), _isValid(false) { this->_cells.resize(1); _dummyStarts = {0}; } ContainerOption getContainerType() const override { return ContainerOption::verletClusterCells; } /** * Function to iterate over all pairs of particles. * This function only handles short-range interactions. * @param traversal to be used used */ void iteratePairwise(TraversalInterface *traversal) override { auto *traversalInterface = dynamic_cast<VerletClusterTraversalInterface<FullParticleCell<Particle>> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<FullParticleCell<Particle>> *>(traversal); if ((!traversalInterface) or (!cellPairTraversal)) { autopas::utils::ExceptionHandler::exception( "trying to use a traversal of wrong type in VerletClusterCells::iteratePairwise"); } traversalInterface->setVerletListPointer(&_neighborCellIds, &_neighborMatrixDim, &_neighborMatrix); if (traversalInterface->getSignature() != _lastTraversalSig or (not _isValid)) { if (!_isValid) { rebuild(); } traversalInterface->rebuildVerlet(_cellsPerDim, this->_cells, _boundingBoxes, std::ceil(this->getInteractionLength() * _gridSideLengthReciprocal), this->getInteractionLength()); _lastTraversalSig = traversalInterface->getSignature(); } cellPairTraversal->setCellsToTraverse(this->_cells); traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } /** * @copydoc VerletLists::addParticle() */ void addParticle(const Particle &p) override { if (autopas::utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { _isValid = false; // removes dummy particles in first cell this->_cells[0].resize(_dummyStarts[0]); // add particle somewhere, because lists will be rebuild anyways this->_cells[0].addParticle(p); ++_dummyStarts[0]; } else { utils::ExceptionHandler::exception( "VerletCluster: trying to add particle that is not inside the bounding box.\n" + p.toString()); } } /** * @copydoc VerletLists::addHaloParticle() */ void addHaloParticle(const Particle &haloParticle) override { Particle p_copy = haloParticle; if (autopas::utils::notInBox(p_copy.getR(), this->getBoxMin(), this->getBoxMax())) { _isValid = false; // removes dummy particles in first cell this->_cells[0].resize(_dummyStarts[0]); p_copy.setOwned(false); // add particle somewhere, because lists will be rebuild anyways this->_cells[0].addParticle(p_copy); ++_dummyStarts[0]; } else { utils::ExceptionHandler::exception( "VerletCluster: trying to add halo particle that is inside the bounding box.\n" + haloParticle.toString()); } } /** * Update a halo particle of the container with the given haloParticle. * @param haloParticle Particle to be updated. * @return Returns true if the particle was updated, false if no particle could be found. */ bool updateHaloParticle(const Particle &haloParticle) override { Particle pCopy = haloParticle; pCopy.setOwned(false); for (auto it = getRegionIterator(utils::ArrayMath::subScalar(pCopy.getR(), this->getSkin() / 2), utils::ArrayMath::addScalar(pCopy.getR(), this->getSkin() / 2), IteratorBehavior::haloOnly); it.isValid(); ++it) { if (pCopy.getID() == it->getID()) { *it = pCopy; return true; } } return false; } /** * Rebuilds the neighbor lists. * @param traversal The used traversal. */ void rebuildNeighborLists(TraversalInterface *traversal) override { auto *traversalInterface = dynamic_cast<VerletClusterTraversalInterface<FullParticleCell<Particle>> *>(traversal); if (!traversalInterface) { autopas::utils::ExceptionHandler::exception( "trying to use a traversal of wrong type in VerletClusterCells::iteratePairwise"); } if (not _isValid) { rebuild(); } traversalInterface->setVerletListPointer(&_neighborCellIds, &_neighborMatrixDim, &_neighborMatrix); traversalInterface->rebuildVerlet(_cellsPerDim, this->_cells, _boundingBoxes, std::ceil(this->getInteractionLength() * _gridSideLengthReciprocal), this->getInteractionLength()); _lastTraversalSig = traversalInterface->getSignature(); } /** * @copydoc VerletLists::deleteHaloParticles */ void deleteHaloParticles() override { _isValid = false; for (size_t i = 0; i < this->_cells.size(); ++i) { for (size_t j = 0; j < _dummyStarts[i];) { if (not this->_cells[i]._particles[j].isOwned()) { // set position outside the domain with other dummy particles auto pos = this->_cells[i]._particles[j].getR(); pos[0] += _boxMaxWithHalo[2] + 8 * this->getInteractionLength(); this->_cells[i]._particles[j].setR(pos); // one more dummy particle --_dummyStarts[i]; // swap last non dummy particle with the halo particle to remove std::swap(this->_cells[i]._particles[j], this->_cells[i]._particles[_dummyStarts[i]]); } else { // move on if no halo particle was removed ++j; } } } } /** * @copydoc VerletLists::updateContainer() */ std::vector<Particle> updateContainer() override { AutoPasLog(debug, "updating container"); deleteHaloParticles(); std::vector<Particle> outsideParticles; for (auto iter = begin(autopas::IteratorBehavior::ownedOnly); iter.isValid(); ++iter) { if (utils::notInBox(iter->getR(), this->getBoxMin(), this->getBoxMax())) { outsideParticles.push_back(*iter); internal::deleteParticle(iter); } } return outsideParticles; } bool isContainerUpdateNeeded() const override { if (not _isValid) { return true; } for (size_t i = 0; i < this->_cells.size(); ++i) { size_t pid = 0; const size_t end = (_boundingBoxes[i].size() > 0) ? _boundingBoxes[i].size() - 1 : 0; for (size_t cid = 0; cid < end; ++cid) { for (unsigned int pic = 0; pic < _clusterSize; ++pic) { if (not particleInSkinOfBox(_boundingBoxes[i][cid], this->_cells[i][pid])) { return true; } ++pid; } } for (unsigned int pic = 0; pic < _clusterSize && pid < _dummyStarts[i]; ++pic) { if (not particleInSkinOfBox(_boundingBoxes[i][_boundingBoxes[i].size() - 1], this->_cells[i][pid])) { return true; } ++pid; } } return false; } TraversalSelectorInfo getTraversalSelectorInfo() const override { return TraversalSelectorInfo(_cellsPerDim, this->getInteractionLength(), {_gridSideLength, _gridSideLength, this->getBoxMax()[2] - this->getBoxMin()[2]}, _clusterSize); } ParticleIteratorWrapper<Particle, true> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<Particle, true>( new internal::VerletClusterCellsParticleIterator<Particle, FullParticleCell<Particle>, true>( &this->_cells, _dummyStarts, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior)); } ParticleIteratorWrapper<Particle, false> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { return ParticleIteratorWrapper<Particle, false>( new internal::VerletClusterCellsParticleIterator<Particle, FullParticleCell<Particle>, false>( &this->_cells, _dummyStarts, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior)); } ParticleIteratorWrapper<Particle, true> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { // Special iterator requires sorted cells #ifdef AUTOPAS_OPENMP #pragma omp single #endif if (not _isValid) { rebuild(); } // there is an implicit barrier at end of single! // restrict search area to the region where particles are const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _boxMinWithHalo); const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _boxMaxWithHalo); // Find cells intersecting the search region size_t xmin = (size_t)((lowerCornerInBounds[0] - _boxMinWithHalo[0] - this->getSkin()) * _gridSideLengthReciprocal); size_t ymin = (size_t)((lowerCornerInBounds[1] - _boxMinWithHalo[1] - this->getSkin()) * _gridSideLengthReciprocal); size_t xlength = ((size_t)((upperCornerInBounds[0] - _boxMinWithHalo[0] + this->getSkin()) * _gridSideLengthReciprocal) - xmin) + 1; size_t ylength = ((size_t)((upperCornerInBounds[1] - _boxMinWithHalo[1] + this->getSkin()) * _gridSideLengthReciprocal) - ymin) + 1; std::vector<size_t> cellsOfInterest(xlength * ylength); auto cellsOfInterestIterator = cellsOfInterest.begin(); int start = xmin + ymin * _cellsPerDim[0]; for (size_t i = 0; i < ylength; ++i) { std::iota(cellsOfInterestIterator, cellsOfInterestIterator + xlength, start + i * _cellsPerDim[0]); cellsOfInterestIterator += xlength; } return ParticleIteratorWrapper<Particle, true>( new internal::VerletClusterCellsRegionParticleIterator<Particle, FullParticleCell<Particle>, true>( &this->_cells, _dummyStarts, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, this->getSkin())); } ParticleIteratorWrapper<Particle, false> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { // restrict search area to the region where particles are const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _boxMinWithHalo); const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _boxMaxWithHalo); // Special iterator requires sorted cells. // Otherwise all cells are traversed with the general Iterator. if (_isValid) { // Find cells intersecting the search region size_t xmin = (size_t)((lowerCornerInBounds[0] - _boxMinWithHalo[0] - this->getSkin()) * _gridSideLengthReciprocal); size_t ymin = (size_t)((lowerCornerInBounds[1] - _boxMinWithHalo[1] - this->getSkin()) * _gridSideLengthReciprocal); size_t xlength = (((upperCornerInBounds[0] - _boxMinWithHalo[0] + this->getSkin()) * _gridSideLengthReciprocal) - xmin) + 1; size_t ylength = (((upperCornerInBounds[1] - _boxMinWithHalo[1] + this->getSkin()) * _gridSideLengthReciprocal) - ymin) + 1; std::vector<size_t> cellsOfInterest(xlength * ylength); auto cellsOfInterestIterator = cellsOfInterest.begin(); int start = xmin + ymin * _cellsPerDim[0]; for (size_t i = 0; i < ylength; ++i) { std::iota(cellsOfInterestIterator, cellsOfInterestIterator + xlength, start + i * _cellsPerDim[0]); cellsOfInterestIterator += xlength; } return ParticleIteratorWrapper<Particle, false>( new internal::VerletClusterCellsRegionParticleIterator<Particle, FullParticleCell<Particle>, false>( &this->_cells, _dummyStarts, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, this->getSkin())); } else { // check all cells // As dummy particles are outside the domain they are only found if the search region is outside the domain. std::vector<size_t> cellsOfInterest(this->_cells.size()); std::iota(cellsOfInterest.begin(), cellsOfInterest.end(), 0); return ParticleIteratorWrapper<Particle, false>( new internal::RegionParticleIterator<Particle, FullParticleCell<Particle>, false>( &this->_cells, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, nullptr, behavior)); } } /** * Get the number of particles excluding dummy Particles saved in the container. * @return Number of particles in the container. */ unsigned long getNumParticles() const override { size_t numParticles = 0ul; #ifdef AUTOPAS_OPENMP // @todo: find a sensible value for magic number // numThreads should be at least 1 and maximal max_threads int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000))); AutoPasLog(trace, "Using {} threads", numThreads); #pragma omp parallel for num_threads(numThreads) reduction(+ : numParticles) #endif for (size_t index = 0; index < _dummyStarts.size(); ++index) { numParticles += _dummyStarts[index]; } return numParticles; } /** * Deletes all particles from the container. */ void deleteAllParticles() override { _isValid = false; std::fill(_dummyStarts.begin(), _dummyStarts.end(), 0); ParticleContainer<FullParticleCell<Particle>>::deleteAllParticles(); } /** * Deletes all Dummy Particles in the container */ void deleteDummyParticles() { for (size_t i = 0; i < this->_cells.size(); ++i) { this->_cells[i].resize(_dummyStarts[i]); } _isValid = false; } protected: /** * Recalculate grids and clusters, * build verlet lists and pad clusters. * @return Vector of particles containing particles no longer in the box */ std::vector<Particle> rebuild() { deleteDummyParticles(); _boundingBoxes.clear(); // get the dimensions and volumes of the box std::array<double, 3> boxSize{}; double volume = 1.0; for (int d = 0; d < 3; ++d) { boxSize[d] = _boxMaxWithHalo[d] - _boxMinWithHalo[d]; volume *= boxSize[d]; } // get all particles and clear clusters std::vector<Particle> invalidParticles; std::vector<Particle> outsideParticles; for (size_t i = 0; i < this->_cells.size(); ++i) { for (auto &p : this->_cells[i]._particles) { if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { invalidParticles.push_back(p); } else { if (p.isOwned()) { outsideParticles.push_back(p); } else { invalidParticles.push_back(p); } } } this->_cells[i].clear(); } // estimate particle density double density = (std::max(1.0, (double)invalidParticles.size())) / volume; // guess optimal grid side length _gridSideLength = std::cbrt(((double)_clusterSize) / density); _gridSideLengthReciprocal = 1 / _gridSideLength; // get cells per dimension size_t sizeGrid = 1; for (int d = 0; d < 2; d++) { _cellsPerDim[d] = static_cast<size_t>(std::ceil(boxSize[d] * _gridSideLengthReciprocal)); sizeGrid *= _cellsPerDim[d]; } _cellsPerDim[2] = static_cast<size_t>(1); // resize to number of grids this->_cells.resize(sizeGrid); _dummyStarts.clear(); _dummyStarts.resize(sizeGrid); _boundingBoxes.resize(sizeGrid); // put particles into grid cells for (size_t i = 0; i < invalidParticles.size(); ++i) { size_t index = (size_t)((invalidParticles[i].getR()[0] - _boxMinWithHalo[0]) * _gridSideLengthReciprocal) + (size_t)((invalidParticles[i].getR()[1] - _boxMinWithHalo[1]) * _gridSideLengthReciprocal) * _cellsPerDim[0]; this->_cells[index].addParticle(invalidParticles[i]); } // sort by last dimension and add dummy particles #if defined(AUTOPAS_OPENMP) #pragma omp parallel for schedule(guided) #endif for (size_t i = 0; i < sizeGrid; ++i) { this->_cells[i].sortByDim(2); const auto numParticles = this->_cells[i].numParticles(); _dummyStarts[i] = numParticles; unsigned int numDummys = _clusterSize; if (numParticles > 0) { numDummys -= (numParticles % (size_t)_clusterSize); } Particle dummyParticle = Particle(); for (unsigned int j = 0; j < numDummys; ++j) { dummyParticle.setR({_boxMaxWithHalo[0] + 8 * this->getInteractionLength() + static_cast<double>(i), _boxMaxWithHalo[1] + 8 * this->getInteractionLength() + static_cast<double>(j), _boxMaxWithHalo[2] + 8 * this->getInteractionLength()}); dummyParticle.setID(std::numeric_limits<size_t>::max()); dummyParticle.setOwned(false); this->_cells[i].addParticle(dummyParticle); } } // make bounding boxes #if defined(AUTOPAS_OPENMP) #pragma omp parallel for schedule(guided) #endif for (size_t i = 0; i < sizeGrid; ++i) { const size_t nClusters = this->_cells[i].numParticles() / _clusterSize; _boundingBoxes[i].resize(nClusters, {_boxMaxWithHalo[0], _boxMaxWithHalo[1], _boxMaxWithHalo[2], _boxMinWithHalo[0], _boxMinWithHalo[1], _boxMinWithHalo[2]}); for (size_t cid = 0; cid < nClusters; ++cid) for (size_t pid = cid * _clusterSize; pid < _dummyStarts[i]; ++pid) { expandBoundingBox(_boundingBoxes[i][cid], this->_cells[i][pid]); } } _isValid = true; return outsideParticles; } private: /** * Expands a bounding Box such the Particle is in it. * @param box * @param p */ void expandBoundingBox(std::array<double, 6> &box, const Particle &p) { for (int i = 0; i < 3; ++i) { box[i] = std::min(box[i], p.getR()[i]); box[3 + i] = std::max(box[3 + i], p.getR()[i]); } } /** * Checks if particle is within skin of bounding box. * @param box * @param p */ bool particleInSkinOfBox(const std::array<double, 6> &box, const Particle &p) const { for (int i = 0; i < 3; ++i) { if (box[0 + i] - this->getSkin() > p.getR()[i] or box[3 + i] + this->getSkin() < p.getR()[i]) return false; } return true; } std::array<double, 3> _boxMinWithHalo, _boxMaxWithHalo; /// indices where dummy particles in the cells start std::vector<size_t> _dummyStarts; // number of particles in a cluster unsigned int _clusterSize; // id of neighbor clusters of a clusters in the form [mycell][mycluster] pair(othercell, othercluster) std::vector<std::vector<std::vector<std::pair<size_t, size_t>>>> _neighborCellIds; size_t _neighborMatrixDim; utils::CudaDeviceVector<unsigned int> _neighborMatrix; // bounding boxes of all clusters (xmin,ymin,zmin,xmax,ymax,zmax) std::vector<std::vector<std::array<double, 6>>> _boundingBoxes; // side length of xy-grid and reciprocal double _gridSideLength; double _gridSideLengthReciprocal; // dimensions of grid std::array<size_t, 3> _cellsPerDim; // specifies if the neighbor list is currently valid bool _isValid; /// Signature of the last Traversal to trigger rebuild when a new one is used std::tuple<TraversalOption, DataLayoutOption, bool> _lastTraversalSig; }; } // namespace autopas
overlap_lib_omp.h
/* ************************************************************************** * This file has various essential routines used by the overlap cluster * algorithm * * Code by Avik Ray (avik@utexas.edu) * **************************************************************************** */ #ifndef OVERLAP_LIB_OMP_H #define OVERLAP_LIB_OMP_H #include <cstdio> #include <cstdlib> #include <iostream> #include <vector> #include <omp.h> #define MALLOC(type, size) (type*)malloc(sizeof(type)*(size)) using namespace std; /* config class */ class config_t{ public: long threshold_init; // Beginning degree threshold long dense_threshold; // Dimension till full SVD is performed long max_cluster_iter; // Maximum iteration in convex clustering subroutine long K_max; // Maximum clusters to be found each iteration long sparse_svd_rank; // Dimension from which sparse SVD is performed int num_threads; // Default number of threads to use bool f_verbose; // Flag to run in verbose mode bool f_log; // Flag to log new clusters each iteration char testID[50]; // Unique string describing the experiment double p; // Value of parameter p double q; // Value of parameter q long gamma; // Value of parameter gamma char edgeListFile[100];// Name of edgelist file long nodes; // Number of nodes long edges; // Number of edges config_t(): threshold_init(1), dense_threshold(250), max_cluster_iter(6), K_max(500), sparse_svd_rank(250), num_threads(1), f_verbose(true), f_log(true), p(0.1), q(0.01), gamma(5), nodes(0), edges(0) {} void init(){ return; } void set_default(){ threshold_init = 1; dense_threshold = 250; max_cluster_iter = 6; K_max = 500; sparse_svd_rank = 250; num_threads = 1; f_verbose = true; f_log = true; strcpy(testID,"001"); p = 0.1; q = 0.01; gamma = 5; strcpy(edgeListFile,"A_test.txt"); nodes = 0; edges = 0; return; } ~config_t(){ } void save(const char *filename){ FILE *fp = fopen(filename,"w"); fprintf(fp,"THRESHOLD_INIT %ld\n",threshold_init); fprintf(fp,"DENSE_THRESHOLD %ld\n",dense_threshold); fprintf(fp,"MAX_CLUSTER_ITER %ld\n",max_cluster_iter); fprintf(fp,"K_MAX %ld\n",K_max); fprintf(fp,"SPARSE_SVD_RANK %ld\n",sparse_svd_rank); fprintf(fp,"DEFAULT_NUM_THREADS %d\n",num_threads); fprintf(fp,"VERBOSE_ON %d\n",f_verbose); fprintf(fp,"LOGS_ON %d\n",f_log); fprintf(fp,"TEST_NUMBER %s\n",testID); fprintf(fp,"p %lf\n",p); fprintf(fp,"q %lf\n",q); fprintf(fp,"GAMMA %ld\n",gamma); fprintf(fp,"EDGELIST_FILE %s\n",edgeListFile); fprintf(fp,"NUM_NODES %ld\n",nodes); fprintf(fp,"NUM_EDGES %ld\n",edges); fclose(fp); return; } void print(){ printf("\n *** TEST CONFIG SETTINGS ***"); printf("\nTHRESHOLD_INIT %ld",threshold_init); printf("\nDENSE_THRESHOLD %ld",dense_threshold); printf("\nMAX_CLUSTER_ITER %ld",max_cluster_iter); printf("\nK_MAX %ld",K_max); printf("\nSPARSE_SVD_RANK %ld",sparse_svd_rank); printf("\nDEFAULT_NUM_THREADS %d",num_threads); printf("\nVERBOSE_ON %d",f_verbose); printf("\nLOGS_ON %d",f_log); printf("\nTEST_NUMBER %s",testID); printf("\np %lf",p); printf("\nq %lf",q); printf("\nGAMMA %ld",gamma); printf("\nEDGELIST_FILE %s",edgeListFile); printf("\nNUM_NODES %ld",nodes); printf("\nNUM_EDGES %ld",edges); printf("\n******************************\n"); return; } void load(const char *filename){ char buff[100]; int _f_verbose, _f_log; FILE *fp = fopen(filename,"r"); fscanf(fp,"%s %ld",buff,&threshold_init); fscanf(fp,"%s %ld",buff,&dense_threshold); fscanf(fp,"%s %ld",buff,&max_cluster_iter); fscanf(fp,"%s %ld",buff,&K_max); fscanf(fp,"%s %ld",buff,&sparse_svd_rank); fscanf(fp,"%s %d",buff,&num_threads); fscanf(fp,"%s %d",buff,&_f_verbose); fscanf(fp,"%s %d",buff,&_f_log); fscanf(fp,"%s %s",buff,testID); fscanf(fp,"%s %lf",buff,&p); fscanf(fp,"%s %lf",buff,&q); fscanf(fp,"%s %ld",buff,&gamma); fscanf(fp,"%s %s",buff,edgeListFile); fscanf(fp,"%s %ld",buff,&nodes); fscanf(fp,"%s %ld",buff,&edges); fclose(fp); if(_f_verbose==1){ f_verbose = true; } else{ f_verbose = false; } if(_f_log==1){ f_log = true; } else{ f_log = false; } return; } }; struct qNode{ long data; qNode* next; //qNode(long d, qNode* n=NULL): data(d), next(n){} qNode(long d): data(d), next(NULL){} }; class queue_t{ private: qNode* first; qNode* last; public: long length; queue_t() : first(NULL), last(NULL), length(0) {} ~queue_t(){ qNode *temp = first; while(temp!=NULL){ first = first->next; delete temp; temp = first; } } void init() { first = NULL; last = NULL; length = 0; return; } void push(long x){ if(first==NULL) { first=new qNode(x); last=first; } else { last->next=new qNode(x); last=last->next; } length++; } long pop(){ if(first!=NULL){ qNode* temp=first; long val = temp->data; first=first->next; delete temp; length--; return(val); } else{ return(-1); } } void front(){ if(first!=NULL) std::cout << first->data; } bool isempty(){ if(first!=NULL) { return(false); } else { return(true); } } void print() { qNode *temp; printf("\nLength of queue = %ld",length); if(first==NULL) printf("\nQueue empty!"); else { temp = first; while(temp!=NULL) { printf("\n%ld",temp->data); temp = temp->next; } } } void clear() { while(length>0) { long val = pop(); } } }; // Sparse matrix format CSR class smat_t{ public: long rows, cols; long nnz, nedges; double *csr_val; long *csr_row_ptr; long *csr_col_idx; long *inv_map_index; long *original_index; bool mem_alloc_by_me; //----------------------------------------------------- smat_t(): csr_val(NULL), csr_row_ptr(NULL), csr_col_idx(NULL), inv_map_index(NULL), original_index(NULL), mem_alloc_by_me(false), rows(0), cols(0), nnz(0), nedges(0) {} smat_t(smat_t& m){ *this = m; mem_alloc_by_me = false; rows = 0; cols = 0; nnz=0; nedges = 0;} //----------------------------------------------------- void load(const config_t &config) { char fileAname[100]; strcpy(fileAname,config.edgeListFile); // Read dimensions rows = config.nodes; nedges = config.edges; cols = rows; nnz = 2*nedges; printf("\nNumber of nodes = %ld",rows); printf("\nNumber of edges = %ld",nedges); printf("\nLoading data ..."); // Allocate memory mem_alloc_by_me = true; csr_val = new double[nnz]; csr_col_idx = new long[nnz]; csr_row_ptr = new long[rows+1]; long *temp_csr_row_ptr = new long[rows+1]; memset(csr_row_ptr,0,sizeof(long)*(rows+1)); memset(temp_csr_row_ptr,0,sizeof(long)*(rows+1)); // Read data FILE *fpx = fopen(fileAname, "r"); // Set csr_row_ptr from non-symmetric edge list for(long i=0,r,c; i<nedges; i++){ fscanf(fpx,"%ld %ld", &r, &c); csr_row_ptr[r]++; csr_row_ptr[c]++; temp_csr_row_ptr[r]++; temp_csr_row_ptr[c]++; } for(long r=1; r<=rows; ++r) csr_row_ptr[r] += csr_row_ptr[r-1]; for(long r=1; r<=rows; ++r) temp_csr_row_ptr[r] += temp_csr_row_ptr[r-1]; // Read again to set column indices and values rewind(fpx); long prev_r_pos, prev_c_pos; for(long i=0,r,c; i<nedges; i++){ fscanf(fpx,"%ld %ld", &r, &c); prev_r_pos = temp_csr_row_ptr[r-1]++; prev_c_pos = temp_csr_row_ptr[c-1]++; csr_col_idx[prev_r_pos] = c-1; csr_col_idx[prev_c_pos] = r-1; csr_val[prev_r_pos] = 1; csr_val[prev_c_pos] = 1; } fclose(fpx); // set original index original_index = new long[rows]; for(long i=0; i<rows; i++) original_index[i] = i; printf("\nFile Loaded !"); delete [] temp_csr_row_ptr; } //----------------------------------------------------- void submatrix(long *map_index, long num_nodes, smat_t *A_src) { rows = num_nodes; cols = rows; mem_alloc_by_me = true; csr_row_ptr = new long[rows+1]; memset(csr_row_ptr,0,sizeof(long)*(rows+1)); inv_map_index = new long[num_nodes]; long *idx_flag = new long[A_src->rows]; memset(idx_flag,0,sizeof(long)*A_src->rows); original_index = new long[num_nodes]; // init lookup table for(long i=0; i<num_nodes; i++) { idx_flag[map_index[i]] = i+1; inv_map_index[i] = map_index[i]; original_index[i] = A_src->original_index[map_index[i]]; } long node, r_start, r_end, c_idx; nnz = 0; queue_t q_cidx, q_val; for(long i=0; i<num_nodes; i++) { node = map_index[i]; r_start = A_src->csr_row_ptr[node]; r_end = A_src->csr_row_ptr[node+1]-1; for(long j=r_start; j<=r_end; j++) { c_idx = A_src->csr_col_idx[j]; if(idx_flag[c_idx]>0) { nnz++; csr_row_ptr[i+1]++; q_cidx.push(idx_flag[c_idx]-1); q_val.push(A_src->csr_val[j]); } } } for(long r=1; r<=rows; ++r) csr_row_ptr[r] += csr_row_ptr[r-1]; nedges = nnz/2; csr_val = new double[nnz]; csr_col_idx = new long[nnz]; for(long i=0; i<nnz; i++) { csr_col_idx[i] = q_cidx.pop(); csr_val[i] = q_val.pop(); } q_cidx.clear(); q_val.clear(); delete [] idx_flag; return; } //----------------------------------------------------- void print_csr(void) { printf("\n\n>>Printing CSR matrix:\n"); printf("\nNumber of nodes = %ld",rows); printf("\nNumber of edges = %ld",nedges); long r_start, r_end; for(long i=0; i<rows; i++) { //printf("\n%d",csr_row_ptr[i]); r_start = csr_row_ptr[i]; r_end = csr_row_ptr[i+1]-1; for(long j=r_start; j<=r_end; j++) printf("\nrow=%ld, col=%ld",i+1,csr_col_idx[j]+1); } printf("\n\noriginal index"); for(long i=0; i<rows; i++) { printf("%ld,",original_index[i]); } return; } //----------------------------------------------------- long nnz_of_row(int i) const {return (csr_row_ptr[i+1]-csr_row_ptr[i]);} //----------------------------------------------------- //void free(void *ptr) {if(!ptr) ::free(ptr);} ~smat_t(){ if(mem_alloc_by_me) { if(csr_val) delete [] csr_val; if(csr_row_ptr) delete [] csr_row_ptr; if(csr_col_idx) delete [] csr_col_idx; if(inv_map_index) delete [] inv_map_index; if(original_index) delete [] original_index; } } //----------------------------------------------------- void clear_space() { if(csr_val) delete [] csr_val; if(csr_row_ptr) delete [] csr_row_ptr; if(csr_col_idx) delete [] csr_col_idx; if(inv_map_index) delete [] inv_map_index; if(original_index) delete [] original_index; mem_alloc_by_me = false; rows = 0; cols = 0; nnz=0; nedges = 0; } //----------------------------------------------------- /* double* multiply (double *x) { double *y = (double *) malloc(rows * sizeof(double)); #pragma omp parallel for schedule(dynamic,32) for (long i=0; i < rows; i++) { double v=0.0; for (long j=row_ptr[i]; j < row_ptr[i+1]; j++) { v += val[j]*x[col_idx[j]]; } y[i] = v; } return y; }*/ }; class comm_t{ public: long K; long K_max; queue_t *q; long max_size; comm_t(long kmax): K(0), K_max(kmax), q(new queue_t[kmax]), max_size(0) { for(long k=0; k<kmax; k++) q[k].init(); } comm_t(){} ~comm_t() { delete [] q; } void clear(){ delete [] q; K = 0; max_size = 0; K_max = 0; return; } void init(long kmax) { K_max = kmax; K = 0; max_size = 0; q = new queue_t[kmax]; return; } void add(long *theta_arr, long *node_idx, long num_node, long num_comm){ if(K+num_comm>K_max) { printf("\n***Error***>> Too many communities !"); return; } long cidx; long *size = new long[num_comm]; memset(size,0,sizeof(long)*num_comm); for(long i=0; i<num_node; i++) { cidx = theta_arr[i]; if(cidx>0) { q[K+cidx-1].push(node_idx[i]); size[cidx-1]++; } } for(long k=0; k<num_comm; k++) { if(size[k]>max_size) max_size = size[k]; } K += num_comm; delete [] size; return; } }; void findConnectedComponents(comm_t &cc, smat_t &A) { long n = A.rows; long *found = new long[n]; memset(found,0,sizeof(long)*n); queue_t q; long r_start, r_end, cidx; long comp_id = 0; for(long i=0; i<n; i++) { if(found[i]>0) { continue; } comp_id++; found[i] = comp_id; q.push(i); long node; while(q.isempty()==false) { node = q.pop(); r_start = A.csr_row_ptr[node]; r_end = A.csr_row_ptr[node+1]-1; for(long j=r_start; j<=r_end; j++) { cidx = A.csr_col_idx[j]; if(found[cidx]==0) { found[cidx] = comp_id; q.push(cidx); } } } } cc.add(found,A.original_index,n,comp_id); delete [] found; return; } bool exploreWhisker(queue_t &q_nodes_visited,long start_node, long start_neighbor, long max_size,smat_t *A){ long n = A->rows; queue_t q; long *found = new long[n]; memset(found,0,sizeof(long)*n); bool isSmall = true; found[start_node] = 1; found[start_neighbor] = 1; q.push(start_node); q_nodes_visited.clear(); // Clear visited queue q_nodes_visited.push(start_node); long node; while(q.isempty()==false){ node = q.pop(); long r_start = A->csr_row_ptr[node]; long r_end = A->csr_row_ptr[node+1]-1; for(long j=r_start; j<=r_end; j++) { long cidx = A->csr_col_idx[j]; if(found[cidx]==0) { found[cidx] = 1; q.push(cidx); q_nodes_visited.push(cidx); } } if(q_nodes_visited.length>=max_size){ isSmall = false; break; } } delete [] found; return(isSmall); } void countEdgesToCluster(long *deg, long *cluster, long size, smat_t *A) { long n = A->rows; memset(deg,0,sizeof(long)*n); long r_start, r_end, cidx; long *skip = new long[n]; memset(skip,0,sizeof(long)*n); for(long i=0; i<size; i++) { skip[cluster[i]] = 1; } long edge_count; for(long i=0; i<n; i++) { if(skip[i]==0) { r_start = A->csr_row_ptr[i]; r_end = A->csr_row_ptr[i+1]-1; edge_count = 0; for(long j=r_start; j<=r_end; j++) { cidx = A->csr_col_idx[j]; for(long k=0; k<size; k++) { if(cluster[k]==cidx) edge_count++; } } deg[i] = edge_count; } } delete [] skip; return; } void countEdgesToClusterPl(long *deg, long *cluster, long size, smat_t *A, int numThreads) { long n = A->rows; memset(deg,0,sizeof(long)*n); long *skip = new long[n]; memset(skip,0,sizeof(long)*n); for(long i=0; i<size; i++) { skip[cluster[i]] = 1; } omp_set_num_threads(numThreads); #pragma omp parallel for schedule(dynamic,32) for(long i=0; i<n; i++) { if(skip[i]==0) { long r_start = A->csr_row_ptr[i]; long r_end = A->csr_row_ptr[i+1]-1; long edge_count = 0; for(long j=r_start; j<=r_end; j++) { long cidx = A->csr_col_idx[j]; for(long k=0; k<size; k++) { if(cluster[k]==cidx) edge_count++; } } deg[i] = edge_count; } } delete [] skip; return; } class degree_t{ public: long *deg; bool f_mem_alloc; long max_degree; long *interimMax; degree_t(){f_mem_alloc = false; max_degree=0;} void calcDegree(smat_t *A) { long n = A->rows; deg = new long[n]; f_mem_alloc = true; printf("\nComputing degree ..."); //long rowFirst, rowLast; for(long i=0; i<n; i++) { deg[i] = 0; long rowFirst = A->csr_row_ptr[i]; long rowLast = A->csr_row_ptr[i+1]-1; deg[i] = rowLast - rowFirst + 1; } return; } void calcDegreePl(smat_t *A, int numThreads) { long n = A->rows; deg = new long[n]; f_mem_alloc = true; printf("\nComputing degree parallel ..."); omp_set_num_threads(numThreads); #pragma omp parallel for schedule(dynamic,32) for(long i=0; i<n; i++) { deg[i] = 0; long rowFirst = A->csr_row_ptr[i]; long rowLast = A->csr_row_ptr[i+1]-1; deg[i] = rowLast - rowFirst + 1; } printf("\nDegree computed !"); return; } long calcMaxDegree(smat_t *A) { long n = A->rows; max_degree = 0; for(long i=0; i<n; i++) { if(deg[i]>max_degree) max_degree = deg[i]; } return(max_degree); } long calcMaxDegreePl(smat_t *A) { long n = A->rows; if(n<800) return(calcMaxDegree(A)); printf("\nComputing max degree parallel ..."); max_degree = 0; interimMax = new long[200]; memset(interimMax,0,200*sizeof(long)); long step = n/200; #pragma omp parallel for schedule(dynamic,32) for(long i=0;i<200;i++){ long start = i*step; long end= start+step-1; if(end>n-1) end = n-1; for(long j=start; j<=end; j++){ if(deg[j]>interimMax[i]) interimMax[i] = deg[j]; } } // Find global max for(long i=0; i<200; i++){ if(interimMax[i]>max_degree) max_degree = interimMax[i]; } delete [] interimMax; return(max_degree); } long threshold(bool *res, long th, long n) { long num_pure_nodes = 0; for(long i=0; i<n; i++) { if(deg[i]<=th) { res[i] = true; num_pure_nodes++; } else { res[i] = false; } } return(num_pure_nodes); } void clear() { if(deg) delete [] deg; max_degree = 0; f_mem_alloc = false; } ~degree_t() { if(f_mem_alloc){ if(deg) delete [] deg; } } }; long size_vdash(long *vdash, long n) { long size = 0; for(long i=0; i<n; i++) size += vdash[i]; return(size); } #endif /*overlap_lib.h*/
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
sparse_matrix.h
#ifndef SPARSE_MATRIX_H #define SPARSE_MATRIX_H // headers {{{ #include <cstdio> #include <cstdlib> #include <cstring> #include <algorithm> #include <vector> #include <cmath> #include <cstddef> #include <assert.h> #include <omp.h> #include <iostream> #ifdef _MSC_VER #if _MSC_VER >= 1600 #include <cstdint> #else typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #endif #endif #if __cplusplus >= 201103L || (defined(_MSC_VER) && (_MSC_VER >= 1500)) // Visual Studio 2008 #define CPP11 #endif /* random number genrator: simulate the interface of python random module*/ #include <limits> #if defined(CPP11) #include <random> template<typename engine_t=std::mt19937> struct random_number_generator : public engine_t { // {{{ typedef typename engine_t::result_type result_type; random_number_generator(unsigned seed=0): engine_t(seed){ } result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; } template<class T=double, class T2=double> T uniform(T start=0.0, T2 end=1.0) { return std::uniform_real_distribution<T>(start, (T)end)(*this); } template<class T=double> T normal(T mean=0.0, T stddev=1.0) { return std::normal_distribution<T>(mean, stddev)(*this); } template<class T=int, class T2=T> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) { return std::uniform_int_distribution<T>(start, end)(*this); } template<class RandIter> void shuffle(RandIter first, RandIter last) { std::shuffle(first, last, *this); } }; #else #include <tr1/random> template<typename engine_t=std::tr1::mt19937> struct random_number_generator : public engine_t { typedef typename engine_t::result_type result_type; random_number_generator(unsigned seed=0): engine_t(seed) { } result_type operator()() { return engine_t::operator()(); } result_type operator()(result_type n) { return randint(result_type(0), result_type(n-1)); } result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; } template<class T, class T2> T uniform(T start=0.0, T2 end=1.0) { typedef std::tr1::uniform_real<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,(T)end))(); } template<class T, class T2> T normal(T mean=0.0, T2 stddev=1.0) { typedef std::tr1::normal_distribution<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(mean, (T)stddev))(); } template<class T, class T2> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) { typedef std::tr1::uniform_int<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,end))(); } template<class RandIter> void shuffle(RandIter first, RandIter last) { std::random_shuffle(first, last, *this); } }; // }}} #endif typedef random_number_generator<> rng_t; template<typename T> void gen_permutation_pair(size_t size, std::vector<T> &perm, std::vector<T> &inv_perm, int seed=0) { // {{{ perm.resize(size); for(size_t i = 0; i < size; i++) perm[i] = i; rng_t rng(seed); rng.shuffle(perm.begin(), perm.end()); //std::srand(seed); //std::random_shuffle(perm.begin(), perm.end()); inv_perm.resize(size); for(size_t i = 0; i < size; i++) inv_perm[perm[i]] = i; } // }}} //#include "zlib_util.h" // }}} #define MALLOC(type, size) (type*)malloc(sizeof(type)*(size)) #define CALLOC(type, size) (type*)calloc((size), sizeof(type)) #define REALLOC(ptr, type, size) (type*)realloc((ptr), sizeof(type)*(size)) //namespace rofu { typedef unsigned major_t; const major_t ROWMAJOR = 0U; const major_t COLMAJOR = 1U; const major_t default_major = COLMAJOR; // Zip Iterator // Commom usage: std::sort(zip_iter(A.begin(),B.begin()), zip_iter(A.end(),B.end())); template<class T1, class T2> struct zip_body; template<class T1, class T2> struct zip_ref; template<class IterT1, class IterT2> struct zip_it; template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y); #define dvec_t dense_vector template<typename val_type> class dvec_t; #define dmat_t dense_matrix template<typename val_type> class dmat_t; #define smat_t sparse_matrix template<typename val_type> class smat_t; #define eye_t identity_matrix template<typename val_type> class eye_t; #define gmat_t general_matrix template<typename val_type> class gmat_t { // {{{ public: size_t rows, cols; gmat_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols){} virtual bool is_sparse() const {return false;} virtual bool is_dense() const {return false;} virtual bool is_identity() const {return false;} smat_t<val_type>& get_sparse() {assert(is_sparse()); return static_cast<smat_t<val_type>&>(*this);} const smat_t<val_type>& get_sparse() const {assert(is_sparse()); return static_cast<const smat_t<val_type>&>(*this);} dmat_t<val_type>& get_dense() {assert(is_dense()); return static_cast<dmat_t<val_type>&>(*this);} const dmat_t<val_type>& get_dense() const {assert(is_dense()); return static_cast<const dmat_t<val_type>&>(*this);} }; // }}} template<typename val_type> class entry_iterator_t; // iterator for files with (i,j,v) tuples template<typename val_type> class smat_iterator_t; // iterator for nonzero entries in smat_t template<typename val_type> class smat_subset_iterator_t; // iterator for nonzero entries in a subset template<typename val_type> class dmat_iterator_t; // iterator for nonzero entries in dmat_t // H = X*W, (X: m*n, W: n*k row-major, H m*k row major) template<typename val_type> void smat_x_dmat(const smat_t<val_type> &X, const val_type* W, const size_t k, val_type *H); template<typename val_type> void smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> void gmat_x_dmat(const gmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H); // H = a*X*W + H0, (X: m*n, W: n*k row-major, H m*k row major) template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H); template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); // H = a*X*W + b*H0, (X: m*n, W: n*k row-major, H m*k row major) template<typename val_type, typename T2, typename T3> void smat_x_dmat(T2 a, const smat_t<val_type>& X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H); template<typename val_type, typename T2, typename T3> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2, typename T3> void gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); // trace(W'*X*H) template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H); // Dense Vector template<typename val_type> class dvec_t { // {{{ friend class dmat_t<val_type>; private: bool mem_alloc_by_me; void zero_init() {len = 0; buf = NULL; mem_alloc_by_me = false;} public: size_t len; val_type *buf; // Default Constructor dvec_t() {zero_init();} // Copy Constructor dvec_t(const dvec_t& v) { // {{{ zero_init(); *this = v; } // }}} // Copy Assignment dvec_t& operator=(const dvec_t& other) { // {{{ if(this == &other) return *this; if(other.is_view()) { // view to view copy if(mem_alloc_by_me) clear_space(); memcpy(this, &other, sizeof(dvec_t)); } else { // deep to deep copy resize(other.size()); memcpy(buf, other.buf, sizeof(val_type)*len); } return *this; } // }}} // View Constructor: allocate space if buf == NULL explicit dvec_t(size_t len, val_type *buf=NULL): len(len), buf(buf), mem_alloc_by_me(false) { // {{{ if(buf == NULL && len != 0) { this->buf = MALLOC(val_type, len); memset(this->buf, 0, sizeof(val_type)*len); mem_alloc_by_me = true; } } // }}} // Fill Constructor explicit dvec_t(size_t len, const val_type &x) {zero_init();resize(len,x);} // dense_matrix_t Converter dvec_t(const dmat_t<val_type>& m) { // {{{ //puts("dvect dmat convert ctor"); zero_init(); if(m.is_view()) {len=m.rows*m.cols; buf=m.buf;} else { resize(m.rows*m.cols); memcpy(buf, m.buf, sizeof(val_type)*len); } } // }}} #if defined(CPP11) // Move Constructor dvec_t(dvec_t&& m){ zero_init(); *this = std::move(m);} // Move Assignment dvec_t& operator=(dvec_t&& other) { // {{{ if(this == &other) return *this; clear_space(); memcpy(this, &other, sizeof(dvec_t)); other.zero_init(); return *this; } // }}} #endif ~dvec_t() {clear_space(); } bool is_view() const {return mem_alloc_by_me==false;} void clear_space() {if(mem_alloc_by_me) free(buf); zero_init();} dvec_t get_view() const {return dvec_t(len, buf);} dvec_t& grow_body() { // {{{ if(is_view()) { dvec_t tmp_view = *this; this->resize(len); memcpy(buf, tmp_view.buf, sizeof(val_type)*len); } return *this; } // }}} dvec_t& assign(const dvec_t& other) { // {{{ assert(len == other.len); return assign((val_type)1.0, other); } // }}} template<typename T> dvec_t& assign(T a, const dvec_t& other) { // {{{ assert(len == other.len); if(a == T(0)) memset(buf, 0, sizeof(val_type)*len); else if(a == T(1)) { if(this == &other) return *this; #pragma omp parallel for schedule(static) for(size_t idx = 0; idx < len; idx++) at(idx) = other.at(idx); } else { #pragma omp parallel for schedule(static) for(size_t idx = 0; idx < len; idx++) at(idx) = a*other.at(idx); } return *this; } // }}} size_t size() const {return len;}; void resize(size_t len_, const val_type &x) { // {{{ resize(len_); for(size_t i = 0; i < len; i++) buf[i] = x; } // }}} void resize(size_t len_) { // {{{ if(mem_alloc_by_me) buf = REALLOC(buf, val_type, len_); else buf = MALLOC(val_type, len_); mem_alloc_by_me = true; len = len_; } // }}} val_type& at(size_t idx) {return buf[idx];} const val_type& at(size_t idx) const {return buf[idx];} val_type& operator[](size_t idx) {return buf[idx];} const val_type& operator[](size_t idx) const {return buf[idx];} val_type* data() {return buf;} const val_type* data() const {return buf;} void print(const char *str="") const { printf("%s dvec_t: len %d, is_view %d, buf %p\n", str, len, is_view(), buf); for(size_t i = 0; i < len; i ++) printf("%g ", buf[i]); puts(""); } }; // }}} // Dense Matrix template<typename val_type> class dmat_t : public gmat_t<val_type> { // {{{ friend class dvec_t<val_type>; public: // size_t rows, cols; inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; val_type *buf; static dmat_t rand(rng_t &rng, size_t m, size_t n, double lower=0.0, double upper=1.0, major_t major_type_=default_major) { // {{{ dmat_t ret(m, n, major_type_); if(lower >= upper) lower = upper; for(size_t idx = 0; idx < m*n; idx++) ret.buf[idx] = (val_type)rng.uniform(lower, upper); return ret; } // }}} static dmat_t randn(rng_t &rng, size_t m, size_t n, double mean=0.0, double std=1.0, major_t major_type_=default_major) { // {{{ dmat_t ret(m, n, major_type_); for(size_t idx = 0; idx < m*n; idx++) ret.buf[idx] = (val_type)rng.normal(mean, std); return ret; } // }}} private: bool mem_alloc_by_me; major_t major_type; typedef dvec_t<val_type> vec_t; std::vector<vec_t> vec_set; // view for each row/col depending on the major_type; void zero_init() {rows=cols=0; buf=NULL; major_type=default_major; mem_alloc_by_me=false; vec_set.clear();} void init_vec_set() { // {{{ if(is_rowmajor()) { vec_set.resize(rows); for(size_t r = 0; r < rows; r++) vec_set[r] = dvec_t<val_type>(cols, &buf[r*cols]); } else { vec_set.resize(cols); for(size_t c = 0; c < cols; c++) vec_set[c] = dvec_t<val_type>(rows, &buf[c*rows]); } } // }}} void inv_major() { // {{{ if(rows == 1 || cols == 1) { major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; init_vec_set(); } else if(rows == cols && !is_view()) { // inplace for square matrix for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < r; c++) std::swap(at(r,c),at(c,r)); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; } else { dmat_t tmp(*this); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; resize(rows,cols); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp.at(r,c); } } // }}} public: // Default Constructor dmat_t() {zero_init();} // Copy Constructor dmat_t(const dmat_t& other, major_t major_type_=default_major) { // {{{ zero_init(); if(other.major_type == major_type_) *this = other; else { // deep copy is required when major_type changes major_type = major_type_; resize(other.rows, other.cols); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = other.at(r,c); } } // }}} // Copy Assignment dmat_t& operator=(const dmat_t& other) { // {{{ if(this == &other) return *this; if(other.is_view()) { // for view if(mem_alloc_by_me) clear_space(); rows = other.rows; cols = other.cols; buf = other.buf; major_type = other.major_type; init_vec_set(); mem_alloc_by_me = false; } else { // deep copy if(is_view() || rows!=other.rows || cols!=other.cols || major_type!=other.major_type) { major_type = other.major_type; resize(other.rows, other.cols); } memcpy(buf, other.buf, sizeof(val_type)*rows*cols); } return *this; } // }}} // View Constructor: allocate space if buf_ == NULL explicit dmat_t(size_t rows_, size_t cols_, major_t major_type=default_major): gmat_t<val_type>(rows_,cols_), buf(NULL), mem_alloc_by_me(false), major_type(major_type) { // {{{ resize(rows,cols); memset(this->buf, 0, sizeof(val_type)*rows*cols); } // }}} explicit dmat_t(size_t rows_, size_t cols_, val_type *buf, major_t major_type_): gmat_t<val_type>(rows_,cols_), buf(buf), mem_alloc_by_me(false), major_type(major_type_) { // {{{ init_vec_set(); } // }}} // Fill Constructor explicit dmat_t(size_t nr_copy, const dvec_t<val_type>& v, major_t major_type_=default_major) { // {{{ zero_init(); major_type = major_type_; resize(nr_copy, v); } // }}} // dense_vector Converter dmat_t(const dvec_t<val_type>& v, major_t major_type_=default_major) { // {{{ zero_init(); major_type = major_type_; if(!v.is_view()) resize(1, v); else { rows = is_rowmajor()? 1: v.size(); cols = is_colmajor()? 1: v.size(); buf = v.buf; init_vec_set(); } } // }}} template<typename T> dmat_t(const smat_t<T>& sm, major_t major_type_=default_major) { // {{{ zero_init(); major_type = major_type_; resize(sm.rows, sm.cols); memset(buf, 0, sizeof(val_type)*rows*cols); for(size_t i = 0; i < sm.rows; i++) for(size_t idx = sm.row_ptr[i]; idx != sm.row_ptr[i+1]; idx++) at(i, sm.col_idx[idx]) = sm.val_t[idx]; } // }}} template<typename T> dmat_t(const eye_t<T>& eye, major_t major_type_=default_major) { // {{{ zero_init(); major_type = major_type_; resize(eye.rows, eye.cols); memset(buf, 0, sizeof(val_type)*rows*cols); for(size_t i = 0; i < rows; i++) at(i,i) = 1; } // }}} #if defined(CPP11) // Move Constructor dmat_t(dmat_t&& m){ zero_init(); *this = std::move(m); } // Move Assignment dmat_t& operator=(dmat_t&& other) { // {{{ if(this == &other) return *this; clear_space(); rows = other.rows; cols = other.cols; buf = other.buf; vec_set = std::move(other.vec_set); mem_alloc_by_me = other.mem_alloc_by_me; major_type = other.major_type; other.zero_init(); return *this; } // }}} #endif ~dmat_t() {if(mem_alloc_by_me) {for(size_t i = 0; i < rows*cols; i++) buf[i]=-1;}clear_space();} bool is_view() const {return mem_alloc_by_me==false;} bool is_dense() const {return true;} bool is_rowmajor() const {return major_type==ROWMAJOR;} bool is_colmajor() const {return major_type==COLMAJOR;} void clear_space() {if(mem_alloc_by_me) free(buf); zero_init();} dmat_t get_view() const {return dmat_t(rows,cols,buf,major_type);} dmat_t& grow_body() { // {{{ if(is_view()) { dmat_t tmp_view = *this; this->resize(rows,cols); memcpy(buf, tmp_view.buf, sizeof(val_type)*rows*cols); } return *this; } // }}} dmat_t transpose() const { // {{{ dmat_t ret = get_view(); ret.to_transpose(); return ret; } // }}} // In-place functions dmat_t& assign(const dmat_t& other) { // {{{ return assign((val_type)1.0, other); } // }}} template<typename T> dmat_t& assign(T a, const dmat_t& other) { // {{{ if(a == T(0)) memset(buf, 0, sizeof(val_type)*rows*cols); else if(a == T(1)) { if(this == &other) return *this; if(is_rowmajor()) { #pragma omp parallel for schedule(static) for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = other.at(r,c); } else { #pragma omp parallel for schedule (static) for(size_t c = 0; c < cols; c++) for(size_t r = 0; r < rows; r++) at(r,c) = other.at(r,c); } } else { if(is_rowmajor()) { #pragma omp parallel for schedule(static) for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = a*other.at(r,c); } else { #pragma omp parallel for schedule(static) for(size_t c = 0; c < cols; c++) for(size_t r = 0; r < rows; r++) at(r,c) = a*other.at(r,c); } } return *this; } // }}} dmat_t& to_transpose() { // {{{ std::swap(rows,cols); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; init_vec_set(); return *this; } // }}} dmat_t& to_rowmajor() {if(is_colmajor()) inv_major(); return *this;} dmat_t& to_colmajor() {if(is_rowmajor()) inv_major(); return *this;} dmat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { // {{{ return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0] : NULL); } // }}} dmat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) { // {{{ dmat_t tmp(*this); resize(rows,cols); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp.at(row_perm? row_perm[r]: r, col_perm? col_perm[c]: c); return *this; } // }}} // IO methods void load_from_binary(const char *filename, major_t major_type_=default_major) { // {{{ FILE *fp = fopen(filename, "rb"); if(fp == NULL) { fprintf(stderr, "Error: can't read the file (%s)!!\n", filename); return; } load_from_binary(fp, major_type_, filename); fclose(fp); } // }}} void load_from_binary(FILE *fp, major_t major_type_=default_major, const char *filename=NULL) { // {{{ clear_space(); zero_init(); size_t rows_, cols_; if(fread(&rows_, sizeof(size_t), 1, fp) != 1) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); if(fread(&cols_, sizeof(size_t), 1, fp) != 1) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); std::vector<double> tmp(rows_*cols_); if(fread(&tmp[0], sizeof(double), rows_*cols_, fp) != rows_*cols_) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); dmat_t<double> tmp_view(rows_, cols_, &tmp[0], ROWMAJOR); major_type = major_type_; resize(rows_, cols_); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp_view.at(r,c); /* major_type = major_type_; if(major_type_ == ROWMAJOR) { resize(rows_, cols_); for(size_t idx=0; idx <rows*cols; idx++) buf[idx] = (val_type)tmp[idx]; } else { dmat_t tmp_view(rows, cols, &buf[0], ROWMAJOR); *this = dmat_t(tmp_view, major_type_); } */ } // }}} void save_binary_to_file(const char *filename) { // {{{ FILE *fp = fopen(filename, "wb"); if(fp == NULL) { fprintf(stderr,"Error: can't open file %s\n", filename); exit(1); } save_binary_to_file(fp); fclose(fp); } // }}} void save_binary_to_file(FILE *fp) { // {{{ fwrite(&rows, sizeof(size_t), 1, fp); fwrite(&cols, sizeof(size_t), 1, fp); std::vector<double> tmp(rows*cols); size_t idx = 0; for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) tmp[idx++] = (double)at(r,c); fwrite(&tmp[0], sizeof(double), tmp.size(), fp); } // }}} size_t size() const {return rows;} void resize(size_t nr_copy, const vec_t &v) { // {{{ if(is_rowmajor()) { size_t rows_ = nr_copy, cols_ = v.size(); resize(rows_, cols_); size_t unit = sizeof(val_type)*v.size(); for(size_t r = 0; r < rows; r++) memcpy(vec_set[r].data(),v.data(),unit); } else { size_t rows_ = v.size(), cols_ = nr_copy; resize(rows_, cols_); size_t unit = sizeof(val_type)*v.size(); for(size_t c = 0; c < cols; c++) memcpy(vec_set[c].data(),v.data(),unit); } } // }}} void resize(size_t rows_, size_t cols_) { // {{{ if(mem_alloc_by_me) { if(rows_*cols_ != rows*cols) buf = (val_type*) realloc(buf, sizeof(val_type)*rows_*cols_); } else { buf = (val_type*) malloc(sizeof(val_type)*rows_*cols_); } mem_alloc_by_me = true; rows = rows_; cols = cols_; init_vec_set(); } // }}} dmat_t& lazy_resize(size_t rows_, size_t cols_, major_t major_type_=0) { // {{{ if(is_view() && rows_*cols_==rows*cols && (major_type_ == 0 || major_type==major_type_)) reshape(rows_,cols_); else { if(major_type_!=0) major_type = major_type_; resize(rows_, cols_); } return *this; } // }}} dmat_t& reshape(size_t rows_, size_t cols_) { // {{{ assert(rows_*cols_ == rows*cols); if(rows_ != rows || cols != cols) { rows = rows_; cols = cols_; init_vec_set(); } return *this; } // }}} inline val_type& at(size_t r, size_t c) {return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r];} inline const val_type& at(size_t r, size_t c) const {return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r];} vec_t& operator[](size_t idx) {return vec_set[idx];} const vec_t& operator[](size_t idx) const {return vec_set[idx];} val_type* data() {return buf;} const val_type* data() const {return buf;} void print_mat(const char *str="", FILE *fp=stdout) const { // {{{ fprintf(fp, "===>%s<===\n", str); fprintf(fp, "rows %ld cols %ld mem_alloc_by_me %d row_major %d buf %p\n", rows, cols, mem_alloc_by_me, is_rowmajor(), buf); for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) fprintf(fp, "%g ", at(r,c)); fprintf(fp, "\n"); } } // }}} }; // }}} // Identity Matrix template<typename val_type> class eye_t : public gmat_t<val_type> { // {{{ public: // size_t rows, cols; inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; eye_t (size_t rows_ = 0): gmat_t<val_type>(rows_,rows_){} bool is_identity() const {return true;} }; // }}} // Sparse matrix format CSC & CSR template<typename val_type> class smat_t : public gmat_t<val_type> { // {{{ private: bool mem_alloc_by_me; bool read_from_binary; unsigned char* binary_buf; size_t binary_buf_len; const static int HeaderSize = sizeof(size_t)+sizeof(size_t)+sizeof(size_t)+sizeof(size_t); void zero_init(); void allocate_space(size_t rows_, size_t cols_, size_t nnz_); void csr_to_csc(); void csc_to_csr(); void update_max_nnz(); public: // static methods static smat_t rand(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double lower=0.0, double upper=1.0) { // {{{ if(lower > upper) lower = upper; smat_t ret; size_t nnz_ = (size_t)(m*n*sparsity); ret.allocate_space(m, n, nnz_); for(size_t idx = 0; idx < nnz_; idx++) { ret.val_t[idx] = rng.uniform(lower, upper); ret.col_idx[idx] = rng.randint(0, n-1); ret.row_ptr[rng.randint(1, m)] += 1; } for(size_t i = 1; i <= m; i++) ret.row_ptr[i] += ret.row_ptr[i-1]; ret.csr_to_csc(); ret.update_max_nnz(); return ret; } // }}} static smat_t randn(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double mean=0.0, double std=1.0) { // {{{ smat_t ret; size_t nnz_ = (size_t)(m*n*sparsity); ret.allocate_space(m, n, nnz_); for(size_t idx = 0; idx < nnz_; idx++) { ret.val_t[idx] = (val_type)rng.normal(mean, std); ret.col_idx[idx] = rng.randint(0, n-1); ret.row_ptr[rng.randint(1,m)] += 1; } for(size_t i = 1; i <= m; i++) ret.row_ptr[i] += ret.row_ptr[i-1]; ret.csr_to_csc(); ret.update_max_nnz(); return ret; } // }}} public: //size_t rows, cols; // inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; size_t nnz, max_row_nnz, max_col_nnz; val_type *val, *val_t; size_t *col_ptr, *row_ptr; unsigned *row_idx, *col_idx; // filetypes for loading smat_t enum format_t {TXT=0, PETSc=1, BINARY=2, COMPRESSION=3}; // Default Constructor smat_t() {zero_init();} // Copy Constructor smat_t(const smat_t& m) {zero_init(); *this = m;} smat_t(const dmat_t<val_type>& m) { // {{{ zero_init(); dmat_iterator_t<val_type> entry_it(m); load_from_iterator(m.rows, m.cols, entry_it.get_nnz(), &entry_it); } //}}} smat_t(const eye_t<val_type>& eye) { // {{{ zero_init(); allocate_space(eye.rows, eye.rows, 0); for(size_t i = 0; i < eye.rows; i++) { row_ptr[i+1] = i+1; col_idx[i] = i; val_t[i] = (val_type)1; } for(size_t j = 0; j < eye.cols; j++) { col_ptr[j+1] = j+1; row_idx[j] = j; val[j] = (val_type)1; } } // }}} smat_t(size_t rows_, size_t cols_, size_t nnz_=0){ // {{{ zero_init(); allocate_space(rows_, cols_, nnz_); } // }}} // Copy Assignment smat_t& operator=(const smat_t& other) { // {{{ if(this == &other) return *this; if(mem_alloc_by_me) clear_space(); if(other.is_view()) // for view memcpy(this, &other, sizeof(smat_t)); else { // deep copy *this = other.get_view(); grow_body(); } return *this; } // }}} #if defined(CPP11) // Move Constructor smat_t(smat_t&& m){zero_init(); *this = std::move(m);} // Move Assignment smat_t& operator=(smat_t&& other) { // {{{ if(this == &other) return *this; clear_space(); memcpy(this, &other, sizeof(smat_t)); other.zero_init(); return *this; } // }}} #endif // Destructor ~smat_t(){ clear_space();} bool is_view() const {return mem_alloc_by_me==false;} bool is_sparse() const {return true;} void clear_space(); smat_t get_view() const; smat_t& grow_body(); smat_t transpose() const; // return a transpose view //const smat_t transpose() const; // return a transpose view // In-place functions smat_t& to_transpose(); // return a transpose view smat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm); smat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL); smat_subset_iterator_t<val_type> row_subset_it(const std::vector<unsigned> &subset) const; smat_subset_iterator_t<val_type> row_subset_it(const unsigned *subset, int subset_size) const; smat_subset_iterator_t<val_type> col_subset_it(const std::vector<unsigned> &subset) const; smat_subset_iterator_t<val_type> col_subset_it(const unsigned *subset, int subset_size) const; smat_t row_subset(const std::vector<unsigned> &subset) const; smat_t row_subset(const unsigned *subset, int subset_size) const; size_t nnz_of_row(unsigned i) const {return (row_ptr[i+1]-row_ptr[i]);} size_t nnz_of_col(unsigned i) const {return (col_ptr[i+1]-col_ptr[i]);} // smat-vector multiplication val_type* Xv(const val_type* v, val_type* Xv) const; dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv) const; val_type* XTu(const val_type* u, val_type* XTu) const; dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu) const; // IO methods void load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type>* entry_it); void load(size_t _rows, size_t _cols, size_t _nnz, const char *filename, format_t fmt); void load_from_PETSc(const char *filename); void load_from_PETSc(FILE *fp, const char *filename=NULL); void save_PETSc_to_file(const char *filename) const; void save_PETSc_to_file(FILE *fp) const; void load_from_binary(const char *filename); void save_binary_to_file(const char *filename) const ; // used for MPI verions void from_mpi(){ // {{{ mem_alloc_by_me = true; max_col_nnz = 0; for(size_t c = 0; c < cols; c++) max_col_nnz = std::max(max_col_nnz, nnz_of_col(c)); } // }}} val_type get_global_mean() const; void remove_bias(val_type bias=0); void print_mat(const char *str="", FILE *fp=stdout) const { // {{{ fprintf(fp, "===>%s<===\n", str); fprintf(fp, "rows,cols,nnz = %lu, %lu, %lu\n", rows, cols, nnz); fprintf(fp, "col_ptr, row_idx, val = %p, %p, %p\n", col_ptr, row_idx, val); fprintf(fp, "row_ptr, col_idx, val_t = %p, %p, %p\n", row_ptr, col_idx, val_t); fprintf(fp, "mem_alloc_by_me = %d\n", mem_alloc_by_me); fprintf(fp, "read_from_binary = %d\n", read_from_binary); } // }}} }; // }}} // Lapack and Blas support {{{ #ifdef _WIN32 #define ddot_ ddot #define sdot_ sdot #define daxpy_ daxpy #define saxpy_ saxpy #define dcopy_ dcopy #define scopy_ scopy #define dgemm_ dgemm #define sgemm_ sgemm #define dposv_ dposv #define sposv_ sposv #define dgesdd_ dgesdd #define sgesdd_ sgesdd #endif extern "C" { double ddot_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *); float sdot_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *); ptrdiff_t dscal_(ptrdiff_t *, double *, double *, ptrdiff_t *); ptrdiff_t sscal_(ptrdiff_t *, float *, float *, ptrdiff_t *); ptrdiff_t daxpy_(ptrdiff_t *, double *, double *, ptrdiff_t *, double *, ptrdiff_t *); ptrdiff_t saxpy_(ptrdiff_t *, float *, float *, ptrdiff_t *, float *, ptrdiff_t *); double dcopy_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *); float scopy_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *); void dgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc); void sgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc); int dposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info); int sposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info); void dgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); void sgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); } template<typename val_type> val_type dot(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline double dot(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return ddot_(len,x,xinc,y,yinc);} template<> inline float dot(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return sdot_(len,x,xinc,y,yinc);} template<typename val_type> val_type scal(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *); template<> inline double scal(ptrdiff_t *len, double *a, double *x, ptrdiff_t *xinc) { return dscal_(len,a,x,xinc);} template<> inline float scal(ptrdiff_t *len, float *a, float *x, ptrdiff_t *xinc) { return sscal_(len,a,x,xinc);} template<typename val_type> ptrdiff_t axpy(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline ptrdiff_t axpy(ptrdiff_t *len, double *alpha, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return daxpy_(len,alpha,x,xinc,y,yinc);}; template<> inline ptrdiff_t axpy(ptrdiff_t *len, float *alpha, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return saxpy_(len,alpha,x,xinc,y,yinc);}; template<typename val_type> val_type copy(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline double copy(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return dcopy_(len,x,xinc,y,yinc);} template<> inline float copy(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return scopy_(len,x,xinc,y,yinc);} template<typename val_type> void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, val_type *alpha, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, val_type *beta, val_type *c, ptrdiff_t *ldc); template<> inline void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc) { dgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } template<> inline void gemm<float>(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc) { sgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } template<typename val_type> int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, ptrdiff_t *info); template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info) { return dposv_(uplo, n, nrhs, a, lda, b, ldb, info); } template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info) { return sposv_(uplo, n, nrhs, a, lda, b, ldb, info); } template<typename val_type> void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, val_type* a, ptrdiff_t* lda, val_type* s, val_type* u, ptrdiff_t* ldu, val_type* vt, ptrdiff_t* ldvt, val_type* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return dgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); } template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return sgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); } // }}} // <x,y> template<typename val_type> val_type do_dot_product(const val_type *x, const val_type *y, size_t size) { // {{{ val_type *xx = const_cast<val_type*>(x); val_type *yy = const_cast<val_type*>(y); ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; return dot(&len, xx, &inc, yy, &inc); } // }}} template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const dvec_t<val_type> &y) { // {{{ assert(x.size() == y.size()); return do_dot_product(x.data(), y.data(), x.size()); } // }}} template<typename val_type> val_type do_dot_product(const dmat_t<val_type> &x, const dmat_t<val_type> &y) { // {{{ assert(x.rows == y.rows && x.cols == y.cols); if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor())) return do_dot_product(x.data(), y.data(), x.rows*x.cols); else { val_type ret = 0.0; const dmat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); const dmat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { double ret_local = 0.0; for(size_t j = 0; j < xx.cols; j++) ret_local += xx.at(i,j)*yy.at(i,j); ret += ret_local; } return (val_type)ret; } } // }}} // y = alpha*x + y template<typename val_type, typename T> void do_axpy(T alpha, const val_type *x, val_type *y, size_t size) { // {{{ if(alpha == 0) return; val_type alpha_ = (val_type)alpha; ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; val_type *xx = const_cast<val_type*>(x); axpy(&len, &alpha_, xx, &inc, y, &inc); } // }}} template<typename val_type, typename T> void do_axpy(T alpha, const dvec_t<val_type> &x, dvec_t<val_type> &y) { // {{{ do_axpy(alpha, x.data(), y.data(), x.size()); } // }}} template<typename val_type, typename T> void do_axpy(T alpha, const dmat_t<val_type> &x, dmat_t<val_type> &y) { // {{{ assert(x.rows == y.rows && x.cols == y.cols); if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor())) do_axpy(alpha, x.data(), y.data(), x.rows*x.cols); else { if(x.rows > x.cols) { #pragma omp parallel for schedule(static) for(size_t i = 0; i < x.rows; i++) for(size_t j = 0; j < x.cols; j++) y.at(i,j) += alpha*x.at(i,j); } else { #pragma omp parallel for schedule(static) for(size_t j = 0; j < x.cols; j++) for(size_t i = 0; i < x.rows; i++) y.at(i,j) += alpha*x.at(i,j); } } } // }}} // x *= alpha template<typename val_type, typename T> void do_scale(T alpha, val_type *x, size_t size) { // {{{ if(alpha == 0.0) { memset(x, 0, sizeof(val_type)*size); } else if (alpha == 1.0) { return; } else { val_type alpha_minus_one = (val_type)(alpha-1); do_axpy(alpha_minus_one, x, x, size); } } // }}} template<typename val_type, typename T> val_type do_scale(T alpha, dvec_t<val_type> &x) { // {{{ do_scale(alpha, x.data(), x.size()); } // }}} template<typename val_type, typename T> val_type do_scale(T alpha, dmat_t<val_type> &x) { // {{{ do_scale(alpha, x.data(), x.rows*x.cols); } // }}} // y = x template<typename val_type> void do_copy(const val_type *x, val_type *y, size_t size) { // {{{ if(x == y) return; ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; val_type *xx = const_cast<val_type*>(x); copy(&len, xx, &inc, y, &inc); } // }}} // A, B, C are stored in column major! template<typename val_type, typename T1, typename T2> void dmat_x_dmat_colmajor(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { // {{{ ptrdiff_t mm = (ptrdiff_t)m, nn = (ptrdiff_t)n, kk = (ptrdiff_t)k; ptrdiff_t lda = trans_A? kk:mm, ldb = trans_B? nn:kk, ldc = mm; char transpose = 'T', notranspose = 'N'; char *transa = trans_A? &transpose: &notranspose; char *transb = trans_B? &transpose: &notranspose; val_type alpha_ = (val_type) alpha; val_type beta_ = (val_type) beta; val_type *AA = const_cast<val_type*>(A); val_type *BB = const_cast<val_type*>(B); gemm(transa, transb, &mm, &nn, &kk, &alpha_, AA, &lda, BB, &ldb, &beta_, C, &ldc); } // }}} // C = alpha*A*B + beta*C // C : m * n, k is the dimension of the middle // A, B, C are stored in row major! template<typename val_type, typename T1, typename T2> void dmat_x_dmat(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { // {{{ dmat_x_dmat_colmajor(alpha, B, trans_B, A, trans_A, beta, C, n, m, k); } //}}} // C = A'*B // C : m*n, k is the dimension of the middle // A, B, C are stored in row major! template<typename val_type> void dmat_trans_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { // {{{ bool trans = true; dmat_x_dmat(val_type(1.0), A, trans, B, !trans, val_type(0.0), C, m, n, k); } // }}} // C=A*B // A, B, C are stored in row major! template<typename val_type> void dmat_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { // {{{ bool trans = true; dmat_x_dmat(val_type(1.0), A, !trans, B, !trans, val_type(0.0), C, m, n, k); } // }}} // Input: an n*k row-major matrix H // Output: an k*k matrix H^TH template<typename val_type> void doHTH(const val_type *H, val_type *HTH, size_t n, size_t k) { // {{{ bool transpose = true; dmat_x_dmat_colmajor(val_type(1.0), H, !transpose, H, transpose, val_type(0.0), HTH, k, k, n); } // }}} // Solve Ax = b, A is symmetric positive definite, b is overwritten with the result x // A will be modifed by internal Lapack. Make copy when necessary template<typename val_type> bool ls_solve_chol(val_type *A, val_type *b, size_t n) { // {{{ ptrdiff_t nn=n, lda=n, ldb=n, nrhs=1, info=0; char uplo = 'U'; posv(&uplo, &nn, &nrhs, A, &lda, b, &ldb, &info); return (info == 0); } // }}} // Solve AX = B, A is symmetric positive definite, B is overwritten with the result X // A is a m-by-m matrix, while B is a m-by-n matrix stored in col_major // A will be modifed by internal Lapack. Make copy when necessary template<typename val_type> bool ls_solve_chol_matrix_colmajor(val_type *A, val_type *B, size_t m, size_t n = size_t(0)) { // {{{ ptrdiff_t mm=m, lda=m, ldb=m, nrhs=n, info=0; char uplo = 'U'; posv(&uplo, &mm, &nrhs, A, &lda, B, &ldb, &info); return (info == 0); } // }}} // Functions for dmat_t type // C = alpha*A*B + beta*C // C : m * n, k is the dimension of the middle template<typename val_type, typename T1, typename T2> dmat_t<val_type>& dmat_x_dmat(T1 alpha, const dmat_t<val_type>& A, const dmat_t<val_type>& B, T2 beta, dmat_t<val_type>& C) { // {{{ assert(A.cols == B.rows); dmat_t<val_type> AA = A.get_view(), BB = B.get_view(); C.lazy_resize(AA.rows, BB.cols); if (C.is_rowmajor()) { bool trans_A = A.is_rowmajor()? false : true; bool trans_B = B.is_rowmajor()? false : true; dmat_x_dmat(alpha, AA.data(), trans_A, BB.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols); } else { bool trans_A = A.is_colmajor()? false : true; bool trans_B = B.is_colmajor()? false : true; dmat_x_dmat_colmajor(alpha, AA.data(), trans_A, BB.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols); } return C; } // }}} // C=A*B template<typename val_type> dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type>& A, const dmat_t<val_type>& B, dmat_t<val_type>& C) { // {{{ return dmat_x_dmat(val_type(1.0), A, B, val_type(0.0), C); } // }}} template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type>& A, const dmat_t<val_type>& B) { // {{{ dmat_t<val_type> C(A.rows,B.cols); dmat_x_dmat(A,B,C); return C; } // }}} // Solve AX = B, A is symmetric positive definite, return X template<typename val_type> dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B) { // {{{ dmat_t<val_type> X(B, COLMAJOR); X.grow_body(); dmat_t<val_type> AA(A); AA.grow_body(); if(ls_solve_chol_matrix_colmajor(AA.data(), X.data(), AA.rows, X.cols) == false) fprintf(stderr, "error to apply ls_solve_cho_matrix_colmajor"); return X; } // }}} // SVD [U S V] = SVD(A), template<typename val_type> class svd_solver_t { // {{{ private: char jobz; ptrdiff_t mm, nn, min_mn, max_mn, lda, ldu, ldvt, lwork1, lwork2, lwork, info; std::vector<val_type> u_buf, v_buf, s_buf, work; std::vector<ptrdiff_t> iwork; size_t k; void prepare_parameter(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced) { // {{{ k = std::min(A.rows, A.cols); mm = (ptrdiff_t)A.rows; nn = (ptrdiff_t)A.cols; min_mn = std::min(mm,nn); max_mn = std::max(mm,nn); lda = mm; ldu = mm; ldvt = reduced? min_mn : nn; lwork1 = 3*min_mn*min_mn + std::max(max_mn, 4*min_mn*min_mn + 4*min_mn); lwork2 = 3*min_mn + std::max(max_mn, 4*min_mn*min_mn + 3*min_mn + max_mn); lwork = 2 * std::max(lwork1, lwork2); // due to differences between lapack 3.1 and 3.4 info = 0; work.resize(lwork); iwork.resize((size_t)(8*min_mn)); if(!S.is_view() || S.size() != k) S.resize(k); if(reduced) { jobz = 'S'; U.lazy_resize(A.rows, k, COLMAJOR); V.lazy_resize(A.cols, k, ROWMAJOR); } else { jobz = 'A'; U.lazy_resize(A.rows, A.rows, COLMAJOR); V.lazy_resize(A.cols, A.cols, ROWMAJOR); } } // }}} public: svd_solver_t() {} bool solve(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true) { // {{{ if(A.is_rowmajor()) return solve(A.transpose(), V, S, U, reduced); else { dmat_t<val_type> AA(A.get_view()); prepare_parameter(AA, U, S, V, reduced); #if defined(CPP11) gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, work.data(), &lwork, iwork.data(), &info); #else gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, &work[0], &lwork, &iwork[0], &info); #endif return (info == 0); } } // }}} }; // }}} template<typename val_type> void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true) { // {{{ svd_solver_t<val_type> solver; solver.solve(A, U, S, V, reduced); } // }}} template<typename val_type> smat_t<val_type> sprand(size_t m, size_t n, double sparsity) { // {{{ static rng_t rng; return smat_t<val_type>::rand(rng, m, n, sparsity); } // }}} template<typename val_type> smat_t<val_type> sprandn(size_t m, size_t n, double sparsity) { // {{{{ static rng_t rng; return smat_t<val_type>::randn(rng, m, n, sparsity); } // }}} template<typename val_type> dmat_t<val_type> drand(size_t m, size_t n, major_t major_type_=default_major) { // {{{ static rng_t rng; return dmat_t<val_type>::rand(rng, m, n, 0.0, 1.0, major_type_ ); } // }}} template<typename val_type> dmat_t<val_type> drandn(size_t m, size_t n, major_t major_type_=default_major) { // {{{{ static rng_t rng; return dmat_t<val_type>::randn(rng, m, n, 0.0, 1.0, major_type_); } // }}} /*-------------- Iterators -------------------*/ template<typename val_type> class entry_t{ // {{{ public: unsigned i, j; val_type v, weight; entry_t(int ii=0, int jj=0, val_type vv=0, val_type ww=1.0): i(ii), j(jj), v(vv), weight(ww){} }; // }}} template<typename val_type> class entry_iterator_t { // {{{ public: size_t nnz; virtual entry_t<val_type> next() = 0; }; // }}} #define MAXLINE 10240 // Iterator for files with (i,j,v) tuples template<typename val_type> class file_iterator_t: public entry_iterator_t<val_type> { // {{{ public: file_iterator_t(size_t nnz_, const char* filename, size_t start_pos=0); ~file_iterator_t(){ if (fp) fclose(fp); } entry_t<val_type> next(); private: size_t nnz; FILE *fp; char line[MAXLINE]; }; // }}} // smat_t iterator template<typename val_type> class smat_iterator_t: public entry_iterator_t<val_type> { // {{{ public: //enum {ROWMAJOR, COLMAJOR}; // major: smat_iterator_t<val_type>::ROWMAJOR or smat_iterator_t<val_type>::COLMAJOR smat_iterator_t(const smat_t<val_type>& M, major_t major = ROWMAJOR); ~smat_iterator_t() {} entry_t<val_type> next(); private: size_t nnz; unsigned *col_idx; size_t *row_ptr; val_type *val_t; size_t rows, cols, cur_idx; size_t cur_row; }; // }}} // smat_t subset iterator template<typename val_type> class smat_subset_iterator_t: public entry_iterator_t<val_type> { // {{{ public: //enum {ROWMAJOR, COLMAJOR}; // major: smat_iterator_t<val_type>::ROWMAJOR or smat_iterator_t<val_type>::COLMAJOR smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset, size_t size, bool remapping=false, major_t major = ROWMAJOR); ~smat_subset_iterator_t() {} size_t get_nnz() {return nnz;} size_t get_rows() {return major==ROWMAJOR? remapping? subset.size(): rows: rows;} size_t get_cols() {return major==ROWMAJOR? cols: remapping? subset.size():cols;} entry_t<val_type> next(); private: size_t nnz; unsigned *col_idx; size_t *row_ptr; val_type *val_t; size_t rows, cols, cur_idx; size_t cur_row; std::vector<unsigned>subset; major_t major; bool remapping; }; // }}} // dmat_t iterator template<typename val_type> class dmat_iterator_t: public entry_iterator_t<val_type> { // {{{ public: dmat_iterator_t(const dmat_t<val_type>& M, double threshold=1e-12) : M(M), nnz(M.rows*M.cols), rows(M.rows), cols(M.cols), threshold(fabs(threshold)) { // {{{ cur_row = 0; cur_col = 0; nnz = 0; bool find_firstnz = true; for(size_t i = 0; i < rows; i++) for(size_t j = 0; j < cols; j++) if(fabs((double)M.at(i,j)) >= threshold) { if(find_firstnz) { cur_row = i; cur_col = j; find_firstnz = false; } nnz++ ; } // printf("cur_row %ld cur_col %ld nnz %ld\n", cur_row, cur_col, nnz); } // }}} ~dmat_iterator_t() {} entry_t<val_type> next() { // {{{ entry_t<val_type> entry(cur_row, cur_col, M.at(cur_row, cur_col)); do { cur_col += 1; if(cur_col == cols) { cur_row += 1; cur_col = 0; } } while(fabs((double)M.at(cur_row, cur_col)) <= threshold ); return entry; } // }}} size_t get_nnz() const {return nnz;} private: size_t nnz; const dmat_t<val_type>& M; size_t rows, cols, cur_row, cur_col; double threshold; }; // }}} // -------------- Implementation -------------- template<typename val_type> inline void smat_t<val_type>::zero_init() { // {{{ mem_alloc_by_me = false; read_from_binary = false; val=val_t=NULL; col_ptr=row_ptr=NULL; row_idx=col_idx=NULL; rows=cols=nnz=max_col_nnz=max_row_nnz=0; } // }}} template<typename val_type> void smat_t<val_type>::allocate_space(size_t rows_, size_t cols_, size_t nnz_) { // {{{ if(mem_alloc_by_me) clear_space(); rows = rows_; cols = cols_; nnz = nnz_; val = MALLOC(val_type, nnz); val_t = MALLOC(val_type, nnz); row_idx = MALLOC(unsigned, nnz); col_idx = MALLOC(unsigned, nnz); row_ptr = MALLOC(size_t, rows+1); col_ptr = MALLOC(size_t, cols+1); memset(row_ptr,0,sizeof(size_t)*(rows+1)); memset(col_ptr,0,sizeof(size_t)*(cols+1)); mem_alloc_by_me = true; } // }}} template<typename val_type> void smat_t<val_type>::clear_space() { // {{{ if(mem_alloc_by_me) { if(read_from_binary) free(binary_buf); else { if(val)free(val); if(val_t)free(val_t); if(row_ptr)free(row_ptr);if(row_idx)free(row_idx); if(col_ptr)free(col_ptr);if(col_idx)free(col_idx); } } zero_init(); } // }}} template<typename val_type> smat_t<val_type> smat_t<val_type>::get_view() const { // {{{ if(is_view()) return *this; else { smat_t tmp; memcpy(&tmp, this, sizeof(smat_t)); tmp.mem_alloc_by_me = false; return tmp; } } // }}} template<typename val_type> smat_t<val_type>& smat_t<val_type>::grow_body() { // {{{ if(is_view()) { smat_t tmp = *this; // a copy of the view col_ptr = MALLOC(size_t, cols+1); memcpy(col_ptr, tmp.col_ptr, sizeof(size_t)*cols+1); row_idx = MALLOC(unsigned, nnz); memcpy(row_idx, tmp.row_idx, sizeof(unsigned)*nnz); val = MALLOC(val_type, nnz); memcpy(val, tmp.val, sizeof(val_type)*nnz); row_ptr = MALLOC(size_t, rows+1); memcpy(row_ptr, tmp.row_ptr, sizeof(size_t)*rows+1); col_idx = MALLOC(unsigned, nnz); memcpy(col_idx, tmp.col_idx, sizeof(unsigned)*nnz); val_t = MALLOC(val_type, nnz); memcpy(val_t, tmp.val_t, sizeof(val_type)*nnz); mem_alloc_by_me = true; } return *this; } // }}} template<typename val_type> smat_t<val_type> smat_t<val_type>::transpose() const{ // {{{ smat_t<val_type> mt = get_view().to_transpose(); /* mt.cols = rows; mt.rows = cols; mt.nnz = nnz; mt.val = val_t; mt.val_t = val; mt.col_ptr = row_ptr; mt.row_ptr = col_ptr; mt.col_idx = row_idx; mt.row_idx = col_idx; mt.max_col_nnz=max_row_nnz; mt.max_row_nnz=max_col_nnz; */ return mt; } // }}} template<typename val_type> smat_t<val_type>& smat_t<val_type>::to_transpose() { // {{{ std::swap(rows,cols); std::swap(val,val_t); std::swap(row_ptr,col_ptr); std::swap(row_idx,col_idx); std::swap(max_col_nnz, max_row_nnz); return *this; } // }}} template<typename val_type> smat_t<val_type>& smat_t<val_type>::apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { // {{{ apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0]: NULL); } // }}} template<typename val_type> smat_t<val_type>& smat_t<val_type>::apply_permutation(const unsigned *row_perm, const unsigned *col_perm) { // {{{ if(row_perm!=NULL) { for(size_t idx = 0; idx < nnz; idx++) row_idx[idx] = row_perm[row_idx[idx]]; csc_to_csr(); csr_to_csc(); } if(col_perm!=NULL) { for(size_t idx = 0; idx < nnz; idx++) col_idx[idx] = col_perm[col_idx[idx]]; csr_to_csc(); csc_to_csr(); } } // }}} template<typename val_type> smat_subset_iterator_t<val_type> smat_t<val_type>::row_subset_it(const std::vector<unsigned> &subset) const { // {{{ return row_subset_it(&subset[0], (int)subset.size()); } // }}} template<typename val_type> smat_subset_iterator_t<val_type> smat_t<val_type>::row_subset_it(const unsigned *subset, int subset_size) const { // {{{ return smat_subset_iterator_t<val_type> (*this, subset, subset_size); } // }}} template<typename val_type> smat_subset_iterator_t<val_type> smat_t<val_type>::col_subset_it(const std::vector<unsigned> &subset) const { // {{{ return col_subset_it(&subset[0], (int)subset.size()); } // }}} template<typename val_type> smat_subset_iterator_t<val_type> smat_t<val_type>::col_subset_it(const unsigned *subset, int subset_size) const { // {{{ bool remmapping = false; // no remapping by default return smat_subset_iterator_t<val_type> (*this, subset, subset_size, remmapping, smat_subset_iterator_t<val_type>::COLMAJOR); } // }}} template<typename val_type> smat_t<val_type> smat_t<val_type>::row_subset(const std::vector<unsigned> &subset) const { // {{{ return row_subset(&subset[0], (int)subset.size()); } // }}} template<typename val_type> smat_t<val_type> smat_t<val_type>::row_subset(const unsigned *subset, int subset_size) const { // {{{ smat_subset_iterator_t<val_type> it(*this, subset, subset_size); smat_t<val_type> sub_smat; sub_smat.load_from_iterator(subset_size, cols, it.get_nnz(), &it); return sub_smat; } // }}} template<typename val_type> val_type smat_t<val_type>::get_global_mean() const { // {{{ val_type sum=0; for(size_t idx = 0; idx < nnz; idx++) sum += val[idx]; return sum/(val_type)nnz; } // }}} template<typename val_type> void smat_t<val_type>::remove_bias(val_type bias) { // {{{ if(bias) { for(size_t idx = 0; idx < nnz; idx++) { val[idx] -= bias; val_t[idx] -= bias; } } } // }}} template<typename val_type> val_type* smat_t<val_type>::Xv(const val_type *v, val_type *Xv) const { // {{{ for(size_t i = 0; i < rows; ++i) { Xv[i] = 0; for(size_t idx = row_ptr[i]; idx < row_ptr[i+1]; ++idx) Xv[i] += val_t[idx] * v[col_idx[idx]]; } return Xv; } // }}} template<typename val_type> dvec_t<val_type>& smat_t<val_type>::Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv) const { // {{{ this->Xv(v.data(), Xv.data()); return Xv; } // }}} template<typename val_type> val_type* smat_t<val_type>::XTu(const val_type *u, val_type *XTu) const { // {{{ for(size_t i = 0; i < cols; ++i) { XTu[i] = 0; for(size_t idx = col_ptr[i]; idx < col_ptr[i+1]; ++idx) XTu[i] += val[idx] * u[row_idx[idx]]; } return XTu; } // }}} template<typename val_type> dvec_t<val_type>& smat_t<val_type>::XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu) const { // {{{ this->XTu(u.data(), XTu.data()); return XTu; } // }}} // Comparator for sorting rates into row/column comopression storage template<typename val_type> class SparseComp { // {{{ public: const unsigned *row_idx; const unsigned *col_idx; SparseComp(const unsigned *row_idx_, const unsigned *col_idx_, bool isCSR=true) { row_idx = (isCSR)? row_idx_: col_idx_; col_idx = (isCSR)? col_idx_: row_idx_; } bool operator()(size_t x, size_t y) const { return (row_idx[x] < row_idx[y]) || ((row_idx[x] == row_idx[y]) && (col_idx[x]< col_idx[y])); } }; // }}} template<typename val_type> void smat_t<val_type>::load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type> *entry_it) { // {{{ clear_space(); // clear any pre-allocated space in case of memory leak rows =_rows,cols=_cols,nnz=_nnz; allocate_space(rows,cols,nnz); // a trick here to utilize the space the have been allocated std::vector<size_t> perm(_nnz); unsigned *tmp_row_idx = col_idx; unsigned *tmp_col_idx = row_idx; val_type *tmp_val = val; for(size_t idx = 0; idx < _nnz; idx++){ entry_t<val_type> rate = entry_it->next(); row_ptr[rate.i+1]++; col_ptr[rate.j+1]++; tmp_row_idx[idx] = rate.i; tmp_col_idx[idx] = rate.j; tmp_val[idx] = rate.v; perm[idx] = idx; } // sort entries into row-majored ordering sort(perm.begin(), perm.end(), SparseComp<val_type>(tmp_row_idx, tmp_col_idx, true)); // Generate CSR format for(size_t idx = 0; idx < _nnz; idx++) { val_t[idx] = tmp_val[perm[idx]]; col_idx[idx] = tmp_col_idx[perm[idx]]; } // Calculate nnz for each row and col max_row_nnz = max_col_nnz = 0; for(size_t r = 1; r <= rows; r++) { max_row_nnz = std::max(max_row_nnz, row_ptr[r]); row_ptr[r] += row_ptr[r-1]; } for(size_t c = 1; c <= cols; c++) { max_col_nnz = std::max(max_col_nnz, col_ptr[c]); col_ptr[c] += col_ptr[c-1]; } // Transpose CSR into CSC matrix for(size_t r = 0; r < rows; ++r){ for(size_t idx = row_ptr[r]; idx < row_ptr[r+1]; idx++){ size_t c = (size_t) col_idx[idx]; row_idx[col_ptr[c]] = r; val[col_ptr[c]++] = val_t[idx]; } } for(size_t c = cols; c > 0; --c) col_ptr[c] = col_ptr[c-1]; col_ptr[0] = 0; } // }}} template<typename val_type> void smat_t<val_type>::load(size_t _rows, size_t _cols, size_t _nnz, const char* filename, typename smat_t<val_type>::format_t fmt) { // {{{ if(fmt == smat_t<val_type>::TXT) { file_iterator_t<val_type> entry_it(_nnz, filename); load_from_iterator(_rows, _cols, _nnz, &entry_it); } else if(fmt == smat_t<val_type>::PETSc) { load_from_PETSc(filename); } else { fprintf(stderr, "Error: filetype %d not supported\n", fmt); return ; } } // }}} template<typename val_type> void smat_t<val_type>::save_PETSc_to_file(const char *filename) const { // {{{ FILE *fp = fopen(filename, "wb"); if(fp == NULL) { fprintf(stderr,"Error: can't open file %s\n", filename); exit(1); } save_PETSc_to_file(fp); } // }}} template<typename val_type> void smat_t<val_type>::save_PETSc_to_file(FILE *fp) const { // {{{ const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015; int32_t int_buf[3] = {(int32_t)LONG_FILE, (int32_t)rows, (int32_t)cols}; std::vector<int32_t> nnz_row(rows); for(size_t r = 0; r < rows; r++) nnz_row[r] = (int)nnz_of_row(r); fwrite(&int_buf[0], sizeof(int32_t), 3, fp); fwrite(&nnz, sizeof(size_t), 1, fp); fwrite(&nnz_row[0], sizeof(int32_t), rows, fp); fwrite(&col_idx[0], sizeof(unsigned), nnz, fp); // the following part == fwrite(val_t, sizeof(double), nnz, fp); const size_t chunksize = 1024; double buf[chunksize]; size_t idx = 0; while(idx + chunksize < nnz) { for(size_t i = 0; i < chunksize; i++) buf[i] = (double) val_t[idx+i]; fwrite(&buf[0], sizeof(double), chunksize, fp); idx += chunksize; } size_t remaining = nnz - idx; for(size_t i = 0; i < remaining; i++) buf[i] = (double) val_t[idx+i]; fwrite(&buf[0], sizeof(double), remaining, fp); } // }}} template<typename val_type> void smat_t<val_type>::load_from_PETSc(const char *filename) { // {{{ FILE *fp = fopen(filename, "rb"); if(fp == NULL) { fprintf(stderr, "Error: can't read the file (%s)!!\n", filename); return; } load_from_PETSc(fp, filename); fclose(fp); } // }}} template<typename val_type> void smat_t<val_type>::load_from_PETSc(FILE *fp, const char *filename) { // {{{ clear_space(); // clear any pre-allocated space in case of memory leak const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015; int32_t int_buf[3]; size_t headersize = 0; headersize += sizeof(int)*fread(int_buf, sizeof(int), 3, fp); int filetype = int_buf[0]; rows = (size_t) int_buf[1]; cols = (size_t) int_buf[2]; if(filetype == UNSIGNED_FILE) { headersize += sizeof(int)*fread(int_buf, sizeof(int32_t), 1, fp); nnz = (size_t) int_buf[0]; } else if (filetype == LONG_FILE){ headersize += sizeof(size_t)*fread(&nnz, sizeof(int64_t), 1, fp); } else { fprintf(stderr, "Error: wrong PETSc format in %s.\n", filename); } allocate_space(rows,cols,nnz); // load CSR from the binary PETSc format { // {{{ // read row_ptr std::vector<int32_t> nnz_row(rows); headersize += sizeof(int32_t)*fread(&nnz_row[0], sizeof(int32_t), rows, fp); row_ptr[0] = 0; for(size_t r = 1; r <= rows; r++) row_ptr[r] = row_ptr[r-1] + nnz_row[r-1]; // read col_idx headersize += sizeof(int)*fread(&col_idx[0], sizeof(unsigned), nnz, fp); // read val_t const size_t chunksize = 1024; double buf[chunksize]; size_t idx = 0; while(idx + chunksize < nnz) { headersize += sizeof(double)*fread(&buf[0], sizeof(double), chunksize, fp); for(size_t i = 0; i < chunksize; i++) val_t[idx+i] = (val_type) buf[i]; idx += chunksize; } size_t remaining = nnz - idx; headersize += sizeof(double)*fread(&buf[0], sizeof(double), remaining, fp); for(size_t i = 0; i < remaining; i++) val_t[idx+i] = (val_type) buf[i]; } // }}} csr_to_csc(); update_max_nnz(); } // }}} template<typename val_type> void smat_t<val_type>::csr_to_csc() { // {{{ memset(col_ptr, 0, sizeof(size_t)*(cols+1)); for(size_t idx = 0; idx < nnz; idx++) col_ptr[col_idx[idx]+1]++; for(size_t c = 1; c <= cols; c++) col_ptr[c] += col_ptr[c-1]; for(size_t r = 0; r < rows; r++) { for(size_t idx = row_ptr[r]; idx != row_ptr[r+1]; idx++) { size_t c = (size_t) col_idx[idx]; row_idx[col_ptr[c]] = r; val[col_ptr[c]++] = val_t[idx]; } } for(size_t c = cols; c > 0; c--) col_ptr[c] = col_ptr[c-1]; col_ptr[0] = 0; } // }}} template<typename val_type> void smat_t<val_type>::csc_to_csr() { // {{{ memset(row_ptr, 0, sizeof(size_t)*(rows+1)); for(size_t idx = 0; idx < nnz; idx++) row_ptr[row_idx[idx]+1]++; for(size_t r = 1; r <= rows; r++) row_ptr[r] += row_ptr[r-1]; for(size_t c = 0; c < cols; c++) { for(size_t idx = col_ptr[c]; idx != col_ptr[c+1]; idx++) { size_t r = (size_t) row_idx[idx]; col_idx[row_ptr[r]] = c; val_t[row_ptr[r]++] = val[idx]; } } for(size_t r = rows; r > 0; r--) row_ptr[r] = row_ptr[r-1]; row_ptr[0] = 0; } // }}} template<typename val_type> void smat_t<val_type>::update_max_nnz() { // {{{ max_row_nnz = max_col_nnz = 0; for(size_t c = 0; c < cols; c++) max_col_nnz = std::max(max_col_nnz, nnz_of_col(c)); for(size_t r = 0; r < rows; r++) max_row_nnz = std::max(max_row_nnz, nnz_of_row(r)); } // }}} template<typename val_type> file_iterator_t<val_type>::file_iterator_t(size_t nnz_, const char* filename, size_t start_pos) { // {{{ nnz = nnz_; fp = fopen(filename,"rb"); if(fp == NULL) { fprintf(stderr, "Error: cannot read the file (%s)!!\n", filename); return; } fseek(fp, start_pos, SEEK_SET); } // }}} template<typename val_type> entry_t<val_type> file_iterator_t<val_type>::next() { // {{{ const int base10 = 10; if(nnz > 0) { --nnz; if(fgets(&line[0], MAXLINE, fp)==NULL) fprintf(stderr, "Error: reading error !!\n"); char *head_ptr = &line[0]; size_t i = strtol(head_ptr, &head_ptr, base10); size_t j = strtol(head_ptr, &head_ptr, base10); double v = strtod(head_ptr, &head_ptr); return entry_t<val_type>(i-1, j-1, (val_type)v); } else { fprintf(stderr, "Error: no more entry to iterate !!\n"); return entry_t<val_type>(0,0,0); } } // }}} template<typename val_type> smat_iterator_t<val_type>::smat_iterator_t(const smat_t<val_type>& M, major_t major) { // {{{ nnz = M.nnz; col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? M.rows: M.cols; cols = (major==ROWMAJOR)? M.cols: M.rows; cur_idx = cur_row = 0; } // }}} template<typename val_type> entry_t<val_type> smat_iterator_t<val_type>::next() { // {{{ while (cur_idx >= row_ptr[cur_row+1]) cur_row++; if (nnz > 0) nnz--; else fprintf(stderr,"Error: no more entry to iterate !!\n"); entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]); cur_idx++; return ret; } // }}} template<typename val_type> smat_subset_iterator_t<val_type>::smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset, size_t size, bool remapping_, major_t major_) { // {{{ major = major_; remapping = remapping_; col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? (remapping?size:M.rows): (remapping?size:M.cols); cols = (major==ROWMAJOR)? M.cols: M.rows; this->subset.resize(size); nnz = 0; for(size_t i = 0; i < size; i++) { unsigned idx = subset[i]; this->subset[i] = idx; nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx); } sort(this->subset.begin(), this->subset.end()); cur_row = 0; cur_idx = row_ptr[this->subset[cur_row]]; } // }}} template<typename val_type> entry_t<val_type> smat_subset_iterator_t<val_type>::next() { // {{{ while (cur_idx >= row_ptr[subset[cur_row]+1]) { cur_row++; cur_idx = row_ptr[subset[cur_row]]; } if (nnz > 0) nnz--; else fprintf(stderr,"Error: no more entry to iterate !!\n"); //entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]); entry_t<val_type> ret_rowwise(remapping?cur_row:subset[cur_row], col_idx[cur_idx], val_t[cur_idx]); entry_t<val_type> ret_colwise(col_idx[cur_idx], remapping?cur_row:subset[cur_row], val_t[cur_idx]); //printf("%d %d\n", cur_row, col_idx[cur_idx]); cur_idx++; //return ret; return major==ROWMAJOR? ret_rowwise: ret_colwise; } // }}} /* H = X*W X is an m*n W is an n*k, row-majored array H is an m*k, row-majored array */ template<typename val_type> void smat_x_dmat(const smat_t<val_type> &X, const val_type* W, const size_t k, val_type *H) { // {{{ size_t m = X.rows; #pragma omp parallel for schedule(dynamic,50) shared(X,W,H) for(size_t i = 0; i < m; i++) { val_type *Hi = &H[k*i]; memset(Hi,0,sizeof(val_type)*k); for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type Xij = X.val_t[idx]; const val_type *Wj = &W[X.col_idx[idx]*k]; for(unsigned t = 0; t < k; t++) Hi[t] += Xij*Wj[t]; } } } // }}} template<typename val_type> void smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { // {{{ assert(W.cols == H.cols && X.cols == W.rows && X.rows == H.rows); assert(W.is_rowmajor() && H.is_rowmajor()); smat_x_dmat(1.0, X, W, 0.0, H, H); //smat_x_dmat(X, W.data(), W.cols, H.data()); } // }}} /* H = a*X*W + b H0 X is an m*n W is an n*k, row-majored array H is an m*k, row-majored array */ template<typename val_type, typename T2, typename T3> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H) { // {{{ size_t m = X.rows; val_type aa = (val_type) a; val_type bb = (val_type) b; if(a == T2(0)) { if(bb == (val_type)0.0){ memset(H, 0, sizeof(val_type)*m*k); return ; } else { if(H!=H0) { do_copy(H0, H, m*k); //memcpy(H, H0, sizeof(val_type)*m*k); } do_scale(bb, H, m*k); } return; } #pragma omp parallel for schedule(dynamic,64) shared(X, W, H, H0, aa,bb) for(size_t i = 0; i < m; i++) { val_type *Hi = &H[k*i]; if(bb == (val_type)0.0) memset(Hi, 0, sizeof(val_type)*k); else { if(Hi!=&H0[k*i]) do_copy(&H0[k*i], Hi, k); do_scale(bb, Hi, k); } for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type Xij = X.val_t[idx]; const val_type *Wj = &W[X.col_idx[idx]*k]; for(size_t t = 0; t < k; t++) Hi[t] += aa*Xij*Wj[t]; } } }// }}} template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H) { // {{{ smat_x_dmat(a, X, W, k, 1.0, H0, H); } // }}} template<typename val_type, typename T2, typename T3> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { // {{{ assert(W.cols == H0.cols && W.cols == H.cols && X.cols == W.rows && X.rows == H0.rows && X.rows == H.rows); if(W.is_rowmajor()) { if(H.is_rowmajor()) { if(H0.is_rowmajor()){ smat_x_dmat(a, X, W.data(), W.cols, b, H0.data(), H.data()); } else { H.assign(b, H0); smat_x_dmat(a, X, W.data(), W.cols, 1.0, H.data(), H.data()); } } else { // H is col_major H.assign(b, H0); // H += aXW #pragma omp parallel for schedule(dynamic, 64) shared(X, W, H) for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){ size_t j = X.col_idx[idx]; const val_type &Xij = X.val_t[idx]; for(size_t t = 0; t < W.cols; t++) H.at(i,t) += a*Xij*W.at(j,t); } } } } else { // W.is_colmajor H.assign(b, H0); if(H.is_colmajor()) { #pragma omp parallel for schedule(static) for(size_t j = 0; j < W.cols; j++) X.Xv(W[j], H[j]); } else { // H.is row_major // H += aXW #pragma omp parallel for schedule(dynamic, 64) shared(X, W, H) for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){ size_t j = X.col_idx[idx]; const val_type &Xij = X.val_t[idx]; for(size_t t = 0; t < W.cols; t++) H.at(i,t) += a*Xij*W.at(j,t); } } } } }// }}} template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { // {{{ smat_x_dmat(a, X, W, 1.0, H0, H); } // }}} /* * H = a*XW + b H0 * X is an m*n gmat * W is an n*k dmat * H is m*k dmat */ template<typename val_type, typename T2, typename T3> void gmat_x_dmat(T2 a, const gmat_t<val_type>& X, const dmat_t<val_type>& W, T3 b, const dmat_t<val_type>& H0, dmat_t<val_type>& H) { // {{{ if(X.is_sparse()) smat_x_dmat(a, X.get_sparse(), W, b, H0, H); else if(X.is_dense()) dmat_x_dmat(a, X.get_dense(), W, b, H0, H); else if(X.is_identity()) { H.assign(b, H0); do_axpy(a, W, H); } } // }}} /* * H = XW * */ template<typename val_type> void gmat_x_dmat(const gmat_t<val_type>& X, const dmat_t<val_type>& W, dmat_t<val_type>& H) { // {{{ if(X.is_sparse()) smat_x_dmat(X.get_sparse(), W, H); else if(X.is_dense()) dmat_x_dmat(X.get_dense(), W, H); else if(X.is_identity()) H.assign(W); } // }}} /* trace(W^T X H) X is an m*n, sparse matrix W is an m*k, row-majored array H is an n*k, row-major */ template<typename val_type> val_type trace_dmat_T_smat_dmat(const val_type *W, const smat_t<val_type> &X, const val_type *H, const size_t k) { // {{{ size_t m = X.rows; double ret = 0; #pragma omp parallel for schedule(dynamic,50) shared(X,H,W) reduction(+:ret) for(size_t i = 0; i < m; i++) { const val_type *Wi = &W[k*i]; for(long idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type *Hj = &H[X.col_idx[idx]*k]; double tmp=0; for(size_t t = 0; t < k; t++) tmp += Wi[t]*Hj[t]; ret += X.val_t[idx]*tmp; } } return (val_type)ret; } // }}} template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H) { // {{{ assert(W.cols == H.cols && W.rows == X.rows && H.rows == X.cols); if(W.is_colmajor() && H.is_colmajor()) { double ret = 0; #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t t = 0; t < W.cols; t++) { const dvec_t<val_type> &u = W[t]; const dvec_t<val_type> &v = H[t]; double local_sum = 0; for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) local_sum += X.val_t[idx]*u[i]*v[X.col_idx[idx]]; } ret += local_sum; } return ret; } else { double ret= 0; #pragma omp parallel for schedule(dynamic,64) reduction(+:ret) for(size_t i = 0; i < X.rows; i++) { double local_sum = 0; for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) { size_t j = X.col_idx[idx]; double sum = 0; for(size_t t = 0; t < W.cols; t++) sum += W.at(i,t)*H.at(j,t); local_sum += sum * X.val_t[idx]; } ret += local_sum; } return ret; } } // }}} /* trace(W^T diag(D) H) D is an m*1 vector W is an m*k, row-majored array H is an m*k, row-major array */ template<typename val_type> val_type trace_dmat_T_diag_dmat(const val_type *W, const val_type *D, const val_type *H, const size_t m, const size_t k) { // {{{ val_type *w = const_cast<val_type*>(W); val_type *h = const_cast<val_type*>(H); val_type *d = const_cast<val_type*>(D); double ret = 0.0; #pragma omp parallel for schedule(static) shared(w,h,d) reduction(+:ret) for(size_t i = 0; i < m; i++) { val_type *wi = &w[i*k], *hi = &h[i*k]; ret += do_dot_product(wi, wi, k) * d[i]; } return (val_type)ret; } // }}} template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dvec_t<val_type> &D, const dmat_t<val_type> &H) { // {{{ assert(W.rows == H.rows && W.rows == D.len && W.cols == H.cols); assert(W.is_rowmajor() && H.is_rowmajor()); return trace_dmat_T_diag_dmat(W.data(),D.data(),H.data(),W.rows,W.cols); } // }}} template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dmat_t<val_type> &D, const dmat_t<val_type> &H) { // {{{ return trace_dmat_T_diag_dmat(W, dvec_t<val_type>(D.get_view()), H); } // }}} //------------------ Implementation of zip_it ----------------------- // helpler functions and classes for zip_it template<class T1, class T2> struct zip_body { // {{{ T1 x; T2 y; zip_body(const zip_ref<T1,T2>& other): x(*other.x), y(*other.y){} bool operator<(const zip_body &other) const {return x < other.x;} bool operator>(zip_body &other) const {return x > other.x;} bool operator==(zip_body &other) const {return x == other.x;} bool operator!=(zip_body &other) const {return x != other.x;} }; // }}} template<class T1, class T2> struct zip_ref { // {{{ T1 *x; T2 *y; zip_ref(T1 &x, T2 &y): x(&x), y(&y){} zip_ref(zip_body<T1,T2>& other): x(&other.x), y(&other.y){} bool operator<(zip_ref other) const {return *x < *other.x;} bool operator>(zip_ref other) const {return *x > *other.x;} bool operator==(zip_ref other) const {return *x == *other.x;} bool operator!=(zip_ref other) const {return *x != *other.x;} zip_ref& operator=(zip_ref& other) { *x = *other.x; *y = *other.y; return *(this); } zip_ref& operator=(zip_body<T1,T2> other) { *x = other.x; *y = other.y; return *(this); } }; // }}} template<class T1, class T2> void swap(zip_ref<T1,T2> a, zip_ref<T1,T2> b) { // {{{ std::swap(*(a.x),*(b.x)); std::swap(*(a.y),*(b.y)); } // }}} template<class IterT1, class IterT2> struct zip_it { // {{{ typedef std::random_access_iterator_tag iterator_category; typedef typename std::iterator_traits<IterT1>::value_type T1; typedef typename std::iterator_traits<IterT2>::value_type T2; typedef zip_body<T1,T2> value_type; typedef zip_ref<T1,T2> reference; typedef zip_body<T1,T2>* pointer; typedef ptrdiff_t difference_type; IterT1 x; IterT2 y; zip_it(IterT1 x, IterT2 y): x(x), y(y){} reference operator*() {return reference(*x, *y);} reference operator[](const difference_type n) const {return reference(x[n],y[n]);} zip_it& operator++() {++x; ++y; return *this;} // prefix ++ zip_it& operator--() {--x; --y; return *this;} // prefix -- zip_it operator++(int) {return zip_it(x++,y++);} // sufix ++ zip_it operator--(int) {return zip_it(x--,y--);} // sufix -- zip_it operator+(const difference_type n) {return zip_it(x+n,y+n);} zip_it operator-(const difference_type n) {return zip_it(x-n,y-n);} zip_it& operator+=(const difference_type n) {x+=n; y+=n; return *this;} zip_it& operator-=(const difference_type n) {x-=n; y-=n; return *this;} bool operator<(const zip_it& other) {return x<other.x;} bool operator>(const zip_it& other) {return x>other.x;} bool operator==(const zip_it& other) {return x==other.x;} bool operator!=(const zip_it& other) {return x!=other.x;} difference_type operator-(const zip_it& other) {return x-other.x;} }; // }}} template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y) { // {{{ return zip_it<IterT1,IterT2>(x,y); } // }}} //}; // end of namespace rofu #undef gmat_t #undef eye_t #undef smat_t #undef dmat_t #undef dvec_t #endif // SPARSE_MATRIX_H
NA.c
// Elements taken from the coop package (src/R_naomit.c) // Copyright (c) 2016-2017 Drew Schmidt #include "safeomp.h" #include <stdint.h> #include <string.h> #include "endianness.h" #include "NA.h" #include "Rfloat.h" #include "unroll.h" // R uses 0x7ff00000000007a2 for NA_REAL, and 0x7f8007a2 is a reasonable float analogue float NA_FLOAT; // extern'd in NA.h float R_NaNf; static inline float set_na_float() { float ret; #if SPM_BOBE int32_t x = 0xa207807f; #else int32_t x = 0x7f8007a2; #endif memcpy((void*) &ret, (void*) &x, sizeof(ret)); return ret; } /* static inline float set_nan_float() { float ret; #if SPM_BOBE uint32_t NaN = 0x0100807f; #else uint32_t NaN = 0x7f800001; #endif memcpy((void*) &ret, (void*) &NaN, sizeof(ret)); return ret; } */ int ISNAf(const float x) { if (!isnan(x)) return 0; mrb y; y.x = x; #if SPM_BOBE return y.y[1] == 1954; #else return y.y[0] == 1954; #endif } int ISNANf(const float x) { return isnan(x) && !ISNAf(x); } // have to call on package load to set the global NA_FLOAT SEXP R_init_NAf() { SEXP ret; PROTECT(ret = newvec(1)); NA_FLOAT = set_na_float(); FLOAT(ret)[0] = NA_FLOAT; UNPROTECT(1); return ret; } SEXP R_init_NaNf() { SEXP ret; PROTECT(ret = newvec(1)); R_NaNf = set_na_float(); FLOAT(ret)[0] = R_NaNf; UNPROTECT(1); return ret; } SEXP R_isna_spm(SEXP x) { SEXP ret; const float_len_t m = NROWS(x); const float_len_t n = NCOLS(x); float *xf = FLOAT(x); if (ISAVEC(x)) PROTECT(ret = allocVector(LGLSXP, ((size_t)m*n))); else PROTECT(ret = allocMatrix(LGLSXP, m, n)); for (float_len_t j=0; j<n; j++) { for (float_len_t i=0; i<m; i++) { const float tmp = xf[i + m*j]; LOGICAL(ret)[i + m*j] = isnan(tmp) || ISNAf(tmp); } } UNPROTECT(1); return ret; } // ---------------------------------------------------------------------------- // anyNA // ---------------------------------------------------------------------------- SEXP R_anyNA_spm(SEXP x) { SEXP ret; const R_xlen_t len = (R_xlen_t) NROWS(x)*NCOLS(x); PROTECT(ret = allocVector(LGLSXP, 1)); LOGICAL(ret)[0] = anyNA(len, DATA(x)); UNPROTECT(1); return ret; } // ---------------------------------------------------------------------------- // na.omit // ---------------------------------------------------------------------------- #include <stdlib.h> // faster to index each element and operate accordingly, but // this is too memory expensive for most applications // note: R does this anyway because, well, R... static SEXP R_naomit_spm_small(const float_len_t m, const float_len_t n, const float *const x) { SEXP ret; const size_t len = m*n; float_len_t m_fin = m; int *na_vec_ind = (int*) calloc(len, sizeof(*na_vec_ind)); CHECKMALLOC(na_vec_ind); // get indices of NA's for (size_t i=0; i<len; i++) { if (ISNAf(x[i]) || isnan(x[i])) na_vec_ind[i] = 1; } // adjust col index; turn first column of the NA indices // to track which rows should go for (float_len_t j=1; j<n; j++) { const float_len_t mj = m*j; for (float_len_t i=0; i<m; i++) { if (na_vec_ind[i + mj]) na_vec_ind[i] = 1; } } // get number of rows of output for (float_len_t i=0; i<m; i++) m_fin -= na_vec_ind[i]; if (m_fin == m) { free(na_vec_ind); return R_NilValue; } // build reduced matrix PROTECT(ret = newmat(m_fin, n)); float *ptr = DATA(ret); for (float_len_t j=0; j<n; j++) { const float_len_t mj = m*j; float_len_t row = 0; for (float_len_t i=0; i<m; i++) { if (!na_vec_ind[i%m]) { ptr[row + m_fin*j] = x[i + mj]; row++; } } } free(na_vec_ind); UNPROTECT(1); return ret; } static SEXP R_naomit_spm_big(const float_len_t m, const float_len_t n, const float *const x) { SEXP ret; float_len_t m_fin = m; int *rows = (int*) calloc(m, sizeof(*rows)); CHECKMALLOC(rows); // get indices of NA's #pragma omp parallel for default(shared) shared(rows) for (float_len_t j=0; j<n; j++) { const float_len_t mj = m*j; for (float_len_t i=0; i<m; i++) { if (ISNAf(x[i + m*j]) || isnan(x[i + mj])) rows[i] = 1; } } // get number of rows of output for (float_len_t i=0; i<m; i++) m_fin -= rows[i]; if (m_fin == m) { free(rows); return R_NilValue; } PROTECT(ret = newmat(m_fin, n)); float *ptr = DATA(ret); // build reduced matrix #pragma omp parallel for default(shared) shared(rows, ptr, m_fin) for (float_len_t j=0; j<n; j++) { const float_len_t mj = m*j; float_len_t row = 0; for (float_len_t i=0; i<m; i++) { if (!rows[i]) { ptr[row + m_fin*j] = x[i + mj]; row++; } } } free(rows); UNPROTECT(1); return ret; } static SEXP R_naomit_spm_vec(size_t n, const float *const x) { SEXP ret; size_t numna = 0; for (size_t i=0; i<n; i++) { if (ISNAf(x[i]) || isnan(x[i])) numna++; } PROTECT(ret = newvec(n-numna)); float *retf = FLOAT(ret); size_t retpos = 0; for (size_t i=0; i<n; i++) { if (!ISNAf(x[i]) && !isnan(x[i])) retf[retpos++] = x[i]; } UNPROTECT(1); return ret; } SEXP R_naomit_spm(SEXP x) { SEXP ret; const float_len_t m = NROWS(x); const float_len_t n = NCOLS(x); if (ISAVEC(x)) return R_naomit_spm_vec(m, DATA(x)); else if (m*n < OMP_MIN_SIZE) ret = R_naomit_spm_small(m, n, DATA(x)); else ret = R_naomit_spm_big(m, n, DATA(x)); if (ret == R_NilValue) return x; else return ret; }
reduction_min.c
// PASS: * // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t // RUN: diff <(mpirun -np 4 %t) %s.reference_output #include <stdio.h> #include <omp.h> int main() { int result = 100; #pragma omp parallel reduction(min:result) { result = omp_get_thread_num(); } printf("Result: %d\n", result); }
preproc.c
#include "incs.h" #include "check.h" #include "preproc_j.h" #include "preproc.h" extern config_t g_C; int preproc( float ** restrict X, /* [m][n] */ uint32_t m, uint32_t n, uint8_t * restrict g, uint32_t *ptr_nT, // encoded as 0 uint32_t *ptr_nH, // encoded as 1 uint64_t ***ptr_Y, uint32_t ***ptr_to, uint64_t ***ptr_tmpY ) { int status = 0; uint64_t **Y = NULL; // [m][n] uint32_t **to = NULL; // [m][n] uint64_t **tmpY = NULL; // [n] Y = malloc(m * sizeof(uint64_t *)); return_if_malloc_failed(Y); to = malloc(m * sizeof(uint32_t *)); return_if_malloc_failed(to); // may want to do this sequentiall to reduce amount of // memory allocateed in the process of creating Y // TODO: Think about this // #pragma omp parallel for // pre-process features one at a time for ( uint32_t j = 0; j < m; j++ ) { status = preproc_j(X[j], n, g, &(Y[j]), &(to[j])); cBYE(status); } cBYE(status); //----------------------------------------- // allocate a buffer (same size as Y) for intermediate storage tmpY = malloc(m * sizeof(uint64_t *)); return_if_malloc_failed(tmpY); for ( uint32_t j = 0; j < m; j++ ) { tmpY[j] = malloc(n * sizeof(uint64_t)); return_if_malloc_failed(tmpY[j]); } // compute the number of heads and tails in the training seet uint32_t nH = 0; #pragma omp simd reduction(+:nH) for ( uint32_t i = 0; i < n; i++ ) { nH += g[i]; } uint32_t nT = n - nH; if ( g_C.is_debug ) { uint32_t chk_nT = 0; uint32_t chk_nH = 0; for ( uint32_t i = 0; i < n; i++ ) { if ( g[i] == 0 ) { nT++; } else { nH++; } } if ( chk_nT != nT ) { go_BYE(-1); } if ( chk_nH != nH ) { go_BYE(-1); } } //--------------------------------- if ( g_C.is_debug ) { status = check(to, g, 0, n, nT, nH, n, m, Y); cBYE(status); } //--------------------------------- *ptr_Y = Y; *ptr_tmpY = tmpY; *ptr_to = to; *ptr_nT = nT; *ptr_nH = nH; BYE: if ( status < 0 ) { if ( Y != NULL ) { for ( uint32_t j = 0; j < m; j++ ) { free_if_non_null(Y[j]); } } if ( tmpY != NULL ) { for ( uint32_t j = 0; j < m; j++ ) { free_if_non_null(tmpY[j]); } } if ( to != NULL ) { for ( uint32_t j = 0; j < m; j++ ) { free_if_non_null(to[j]); } } free_if_non_null(Y); free_if_non_null(to); free_if_non_null(tmpY); } return status; }
4963_so4_new.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; } ; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads); int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, const int x0_blk0_size, const int y0_blk0_size, const int nthreads, const int nthreads_nonaffine, struct profiler * timers) { float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); for (int time = time_m, t0 = (time)%(3), t1 = (time + 2)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 2)%(3), t2 = (time + 1)%(3)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,x0_blk0_size,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y0_blk0_size,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,dt,u_vec,vp_vec,t0,t1,t2,(x_M - x_m + 1)%(x0_blk0_size),x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,(y_M - y_m + 1)%(y0_blk0_size),y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_src_M - p_src_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { float posx = -o_x + src_coords[p_src][0]; float posy = -o_y + src_coords[p_src][1]; float posz = -o_z + src_coords[p_src][2]; int ii_src_0 = (int)(floor(6.66667e-2*posx)); int ii_src_1 = (int)(floor(6.66667e-2*posy)); int ii_src_2 = (int)(floor(6.66667e-2*posz)); int ii_src_3 = (int)(floor(6.66667e-2*posz)) + 1; int ii_src_4 = (int)(floor(6.66667e-2*posy)) + 1; int ii_src_5 = (int)(floor(6.66667e-2*posx)) + 1; float px = (float)(posx - 1.5e+1F*(int)(floor(6.66667e-2F*posx))); float py = (float)(posy - 1.5e+1F*(int)(floor(6.66667e-2F*posy))); float pz = (float)(posz - 1.5e+1F*(int)(floor(6.66667e-2F*posz))); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r0 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4])*(-2.96296e-4F*px*py*pz + 4.44445e-3F*px*py + 4.44445e-3F*px*pz - 6.66667e-2F*px + 4.44445e-3F*py*pz - 6.66667e-2F*py - 6.66667e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r0; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r1 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4])*(2.96296e-4F*px*py*pz - 4.44445e-3F*px*pz - 4.44445e-3F*py*pz + 6.66667e-2F*pz)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r2 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4])*(2.96296e-4F*px*py*pz - 4.44445e-3F*px*py - 4.44445e-3F*py*pz + 6.66667e-2F*py)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = (dt*dt)*(vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4])*(-2.96296e-4F*px*py*pz + 4.44445e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r3; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r4 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4])*(2.96296e-4F*px*py*pz - 4.44445e-3F*px*py - 4.44445e-3F*px*pz + 6.66667e-2F*px)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4])*(-2.96296e-4F*px*py*pz + 4.44445e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r5; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r6 = (dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4])*(-2.96296e-4F*px*py*pz + 4.44445e-3F*px*py)*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r6; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = 2.96296e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]*vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4])*src[time][p_src]; #pragma omp atomic update u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r7; } } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic,1) for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size) { for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1) { for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1) { #pragma omp simd aligned(damp,u,vp:32) for (int z = z_m; z <= z_M; z += 1) { float r15 = 1.0/dt; float r14 = 1.0/(dt*dt); float r13 = 1.0/(vp[x + 4][y + 4][z + 4]*vp[x + 4][y + 4][z + 4]); u[t2][x + 4][y + 4][z + 4] = (r13*(-r14*(-2.0F*u[t0][x + 4][y + 4][z + 4] + u[t1][x + 4][y + 4][z + 4])) + r15*(damp[x + 1][y + 1][z + 1]*u[t0][x + 4][y + 4][z + 4]) - 3.70370379e-4F*(u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 5.92592607e-3F*(u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 3.33333341e-2F*u[t0][x + 4][y + 4][z + 4])/(r13*r14 + r15*damp[x + 1][y + 1][z + 1]); } } } } } } }
target_enter_data.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target enter data map(alloc: i) #pragma omp target enter data map(present, alloc: i) #pragma omp target exit data map(delete: i) // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target enter data map(present, alloc: i) // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
GB_unop__identity_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_int8) // op(A') function: GB (_unop_tran__identity_uint64_int8) // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_int8) ( uint64_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SimulationTools.h
////////////////////////////////////////////////////////////////////////////////// // COMPANY: Ruhr University Bochum, Embedded Security // AUTHOR: Amir Moradi (for the paper: https://eprint.iacr.org/2019/1312 ) ////////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2019, Amir Moradi // All rights reserved. // // BSD-3-Clause License // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the copyright holder, their organization nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTERS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //*************************************************************************************** int RunSimulation(SignalStruct** Signals, int ClockSignal, int Max_No_ClockCycles, int InitialSim_NumberOfClockCycles, int InitialSim_NumberOfInputs, int** InitialSim_Inputs, char** InitialSim_Values, CellStruct** Cells, int* Regs, int NumberOfRegs, short MaxDepth, int** CellsInDepth, int* NumberOfCellsInDepth, CellTypeStruct** CellTypes, int* EndSimCondition_Signals, char* EndSimCondition_Values, int EndSimCondition_NumberOfSignals, int EndSim_NumberOfWaitCycles, int* SignalValues, int* RegValues, char*** Faults) { int i; int InputIndex; int OutputIndex; int SignalIndex; int RegIndex; int DepthIndex; int CellIndex; int ClockCycle; int v; int Value; int NumberOfWaitedClockCycles = -1; for (ClockCycle = 0;ClockCycle < Max_No_ClockCycles;ClockCycle++) { SignalValues[ClockSignal] = 1; // ----------- evaluate the registers for (RegIndex = 0;RegIndex < NumberOfRegs;RegIndex++) { v = 0; for (InputIndex = 0;InputIndex < Cells[Regs[RegIndex]]->NumberOfInputs;InputIndex++) v |= SignalValues[Cells[Regs[RegIndex]]->Inputs[InputIndex]] << InputIndex; for (OutputIndex = 0;OutputIndex < Cells[Regs[RegIndex]]->NumberOfOutputs;OutputIndex++) v |= RegValues[Cells[Regs[RegIndex]]->RegValueIndexes[OutputIndex]] << (Cells[Regs[RegIndex]]->NumberOfInputs + OutputIndex); for (OutputIndex = 0;OutputIndex < Cells[Regs[RegIndex]]->NumberOfOutputs;OutputIndex++) { Value = CellTypes[Cells[Regs[RegIndex]]->Type]->Tables[OutputIndex][v]; Value ^= Faults[FaultInjection_toggle][ClockCycle][Regs[RegIndex]]; Value |= Faults[FaultInjection_stuck_at_1][ClockCycle][Regs[RegIndex]]; Value &= !Faults[FaultInjection_stuck_at_0][ClockCycle][Regs[RegIndex]]; RegValues[Cells[Regs[RegIndex]]->RegValueIndexes[OutputIndex]] = Value; } } // ----------- applying the initial inputs if (ClockCycle < InitialSim_NumberOfClockCycles) for (InputIndex = 0;InputIndex < InitialSim_NumberOfInputs;InputIndex++) SignalValues[InitialSim_Inputs[ClockCycle][InputIndex]] = InitialSim_Values[ClockCycle][InputIndex]; // ----------- applying the register outputs to the output signals for (RegIndex = 0;RegIndex < NumberOfRegs;RegIndex++) for (OutputIndex = 0;OutputIndex < Cells[Regs[RegIndex]]->NumberOfOutputs;OutputIndex++) if (Cells[Regs[RegIndex]]->Outputs[OutputIndex] != -1) SignalValues[Cells[Regs[RegIndex]]->Outputs[OutputIndex]] = RegValues[Cells[Regs[RegIndex]]->RegValueIndexes[OutputIndex]]; // ----------- evaluate the circuits :D for (DepthIndex = 1;DepthIndex <= MaxDepth;DepthIndex++) { for (i = 0;i < NumberOfCellsInDepth[DepthIndex];i++) { CellIndex = CellsInDepth[DepthIndex][i]; v = 0; for (InputIndex = 0;InputIndex < Cells[CellIndex]->NumberOfInputs;InputIndex++) v |= SignalValues[Cells[CellIndex]->Inputs[InputIndex]] << InputIndex; for (OutputIndex = 0;OutputIndex < Cells[CellIndex]->NumberOfOutputs;OutputIndex++) if (Cells[CellIndex]->Outputs[OutputIndex] != -1) { Value = CellTypes[Cells[CellIndex]->Type]->Tables[OutputIndex][v]; Value ^= Faults[FaultInjection_toggle][ClockCycle][CellIndex]; Value |= Faults[FaultInjection_stuck_at_1][ClockCycle][CellIndex]; Value &= !Faults[FaultInjection_stuck_at_0][ClockCycle][CellIndex]; SignalValues[Cells[CellIndex]->Outputs[OutputIndex]] = Value; } } } SignalValues[ClockSignal] = 0; // re-evaluate (we don't need it in this design since it works only at possitive edge of the clock and does not have a latch // // // // ----------- check the conditions to terminate the simulation if (ClockCycle > InitialSim_NumberOfClockCycles) { if (NumberOfWaitedClockCycles == -1) { for (SignalIndex = 0;SignalIndex < EndSimCondition_NumberOfSignals;SignalIndex++) if (SignalValues[EndSimCondition_Signals[SignalIndex]] != EndSimCondition_Values[SignalIndex]) break; if (SignalIndex >= EndSimCondition_NumberOfSignals) NumberOfWaitedClockCycles = 0; } else NumberOfWaitedClockCycles++; if (NumberOfWaitedClockCycles >= EndSim_NumberOfWaitCycles) break; } } return (ClockCycle); } //*************************************************************************************** int RunFaultInjection(int Max_no_of_Threads, SignalStruct** Signals, int NumberOfSignals, int ClockSignal, int NumberOfRegValues, int Max_No_ClockCycles, CellStruct** Cells, int NumberOfCells, char FaultInjectionType, char FaultInjectionMethod, int NumberOfSimulationsInFile, int NumberOfTargetClockCycles, int* TargetClockCycles, int MaxNumberOfFaultsPerRun, int MinNumberOfFaultsPerRun, int MaxNumberOfFaultsPerCycle, int MinNumberOfFaultsPerCycle, char* EvaluationResultFileName, int InitialSim_NumberOfClockCycles, int InitialSim_NumberOfInputs, int** InitialSim_Inputs, char** InitialSim_Values, int* Regs, int NumberOfRegs, short MaxDepth, int** CellsInDepth, int* NumberOfCellsInDepth, CellTypeStruct** CellTypes, int* EndSimCondition_Signals, char* EndSimCondition_Values, int EndSimCondition_NumberOfSignals, int EndSim_NumberOfWaitCycles, char** EndSim_OutputNames,int* EndSim_Outputs_IndexL, int* EndSim_Outputs_IndexH, char* EndSim_Outputs_Base, int EndSim_NumberOfOutputBlocks, int* FaultFreeSignalValues, int NumberOfFaultFreeOutputs, int* FaultFreeOutputs, char Print_Nondetected, char Print_Detected, char Print_Ineffective, char Print_RunTimeOver, SimulationResultStruct* &SimulationResults, int &NumberOfSimulations) { int CellIndex; int *FaultAllowedCells; int NumberOfFaultAllowedCells; int ClockCycle; int **SignalValues = NULL; int **RegValues = NULL; char ****Faults = NULL; int ThreadIndex; int SimulationIndex; int SimulationCounter; int RangeNumberOfFaultsPerCycle; int RangeNumberOfFaultsPerRun; int *DetectedCounter; int *NondetectedCounter; int *IneffectiveCounter; int *RunTimeOverCounter; FILE *EvaluationResultFile; int TotalDetected; int TotalNondetected; int TotalIneffective; int TotalRunTimeOver; int LocalIndex; int i, j, k; char ValidSimulation; int NumberOfInjectedFaults; int NumberOfFaultsInCycle; int SelectedNumberOfInjectedFaults; char *Seeded; int MaxTargetClockCycle; int MinTargetClockCycle; char *TargetClockCycleValid; int ClockCycleIndex; clock_t begin; NumberOfFaultAllowedCells = 0; for (CellIndex = 0;CellIndex < NumberOfCells;CellIndex++) if (Cells[CellIndex]->FaultAllowed) NumberOfFaultAllowedCells++; FaultAllowedCells = (int*)malloc(NumberOfFaultAllowedCells * sizeof(int)); NumberOfFaultAllowedCells = 0; for (CellIndex = 0;CellIndex < NumberOfCells;CellIndex++) if (Cells[CellIndex]->FaultAllowed) FaultAllowedCells[NumberOfFaultAllowedCells++] = CellIndex; SignalValues = (int **)malloc(Max_no_of_Threads * sizeof(int *)); RegValues = (int **)malloc(Max_no_of_Threads * sizeof(int *)); Faults = (char ****)malloc(Max_no_of_Threads * sizeof(char ***)); DetectedCounter = (int *)calloc(Max_no_of_Threads, sizeof(int)); NondetectedCounter = (int *)calloc(Max_no_of_Threads, sizeof(int)); IneffectiveCounter = (int *)calloc(Max_no_of_Threads, sizeof(int)); RunTimeOverCounter = (int *)calloc(Max_no_of_Threads, sizeof(int)); Seeded = (char *)calloc(Max_no_of_Threads, sizeof(char)); for (ThreadIndex = 0;ThreadIndex < Max_no_of_Threads;ThreadIndex++) { SignalValues[ThreadIndex] = (int *)calloc(NumberOfSignals, sizeof(int)); RegValues[ThreadIndex] = (int *)calloc(NumberOfRegValues, sizeof(int)); Faults[ThreadIndex] = (char ***)malloc(NumberOfFaultInjectionTypes * sizeof(char **)); SignalValues[ThreadIndex][1] = 1; // constant 1'b1 for (i = 0;i < NumberOfFaultInjectionTypes;i++) { Faults[ThreadIndex][i] = (char **)malloc(Max_No_ClockCycles * sizeof(char *)); for (ClockCycle = 0;ClockCycle < Max_No_ClockCycles;ClockCycle++) Faults[ThreadIndex][i][ClockCycle] = (char *)calloc(NumberOfCells, sizeof(char)); } } TargetClockCycleValid = (char*)calloc(Max_No_ClockCycles, sizeof(char)); MaxTargetClockCycle = TargetClockCycles[0]; MinTargetClockCycle = TargetClockCycles[0]; for (ClockCycleIndex = 0;ClockCycleIndex < NumberOfTargetClockCycles;ClockCycleIndex++) { TargetClockCycleValid[TargetClockCycles[ClockCycleIndex]] = 1; if (MaxTargetClockCycle < TargetClockCycles[ClockCycleIndex]) MaxTargetClockCycle = TargetClockCycles[ClockCycleIndex]; if (MinTargetClockCycle > TargetClockCycles[ClockCycleIndex]) MinTargetClockCycle = TargetClockCycles[ClockCycleIndex]; } if (FaultInjectionMethod == FaultInjection_Exhaustive) NumberOfSimulations = pow(NumberOfFaultAllowedCells, MinNumberOfFaultsPerCycle * NumberOfTargetClockCycles); else // FaultInjection_Random NumberOfSimulations = NumberOfSimulationsInFile; if ((NumberOfSimulations > 600000000L) | (NumberOfSimulations < 0)) { printf("Number of simulations %d is over the threshold", NumberOfSimulations); return 1; } else { if (FaultInjectionMethod == FaultInjection_Exhaustive) printf("Number of cells to be faulty: %d\n", NumberOfFaultAllowedCells); printf("Number of simulations: %d\n", NumberOfSimulations); } SimulationResults = (SimulationResultStruct *)malloc(NumberOfSimulations * sizeof(SimulationResultStruct)); omp_set_num_threads(Max_no_of_Threads); RangeNumberOfFaultsPerCycle = MaxNumberOfFaultsPerCycle - MinNumberOfFaultsPerCycle + 1; RangeNumberOfFaultsPerRun = MaxNumberOfFaultsPerRun - MinNumberOfFaultsPerRun + 1; SimulationCounter = 0; EvaluationResultFile = fopen(EvaluationResultFileName, "wt"); begin = clock(); #pragma omp parallel for schedule(guided) private(ThreadIndex, ClockCycleIndex, ClockCycle, i, j, k, LocalIndex, SelectedNumberOfInjectedFaults, NumberOfInjectedFaults, NumberOfFaultsInCycle, ValidSimulation, TotalDetected, TotalNondetected, TotalIneffective, TotalRunTimeOver) for (SimulationIndex = 0;SimulationIndex < NumberOfSimulations; SimulationIndex++) { ThreadIndex = omp_get_thread_num(); if (!Seeded[ThreadIndex]) { srand(int(time(NULL)) ^ ThreadIndex); Seeded[ThreadIndex] = 1; } SimulationResults[SimulationIndex].TaregtCells = (int *)malloc(MaxNumberOfFaultsPerRun * sizeof(int)); SimulationResults[SimulationIndex].TaregtClockCycles = (int *)malloc(MaxNumberOfFaultsPerRun * sizeof(int)); if (FaultInjectionMethod == FaultInjection_Exhaustive) { NumberOfInjectedFaults = NumberOfTargetClockCycles * MinNumberOfFaultsPerCycle; ValidSimulation = 1; LocalIndex = SimulationIndex; for (ClockCycleIndex = 0;(ClockCycleIndex < NumberOfTargetClockCycles) & ValidSimulation;ClockCycleIndex++) { k = ClockCycleIndex * MinNumberOfFaultsPerCycle; for (i = 0;(i < MinNumberOfFaultsPerCycle) & ValidSimulation;i++) { SimulationResults[SimulationIndex].TaregtCells[k + i] = FaultAllowedCells[LocalIndex % NumberOfFaultAllowedCells]; LocalIndex = (LocalIndex - (LocalIndex % NumberOfFaultAllowedCells)) / NumberOfFaultAllowedCells; SimulationResults[SimulationIndex].TaregtClockCycles[k + i] = TargetClockCycles[ClockCycleIndex]; for (j = 0;(j < i) & ValidSimulation;j++) if (SimulationResults[SimulationIndex].TaregtCells[k + i] >= SimulationResults[SimulationIndex].TaregtCells[k + j]) ValidSimulation = 0; } } } else //FaultInjection_Random { NumberOfInjectedFaults = 0; ValidSimulation = 1; SelectedNumberOfInjectedFaults = MinNumberOfFaultsPerRun + (rand() % RangeNumberOfFaultsPerRun); while (NumberOfInjectedFaults < SelectedNumberOfInjectedFaults) { do { ClockCycleIndex = rand() % NumberOfTargetClockCycles; ClockCycle = TargetClockCycles[ClockCycleIndex]; for (j = 0;j < NumberOfInjectedFaults;j++) if (SimulationResults[SimulationIndex].TaregtClockCycles[j] == ClockCycle) break; } while (j < NumberOfInjectedFaults); NumberOfFaultsInCycle = MinNumberOfFaultsPerCycle + (rand() % RangeNumberOfFaultsPerCycle); for (i = 0;(i < NumberOfFaultsInCycle) & (NumberOfInjectedFaults < MaxNumberOfFaultsPerRun);i++) { SimulationResults[SimulationIndex].TaregtCells[NumberOfInjectedFaults] = rand() % NumberOfCells; if (Cells[SimulationResults[SimulationIndex].TaregtCells[NumberOfInjectedFaults]]->FaultAllowed) { SimulationResults[SimulationIndex].TaregtClockCycles[NumberOfInjectedFaults] = ClockCycle; for (j = 0;j < i;j++) if (SimulationResults[SimulationIndex].TaregtCells[NumberOfInjectedFaults] == SimulationResults[SimulationIndex].TaregtCells[NumberOfInjectedFaults - j - 1]) break; if (j < i) i--; else NumberOfInjectedFaults++; } else i--; } } } SimulationResults[SimulationIndex].Valid = ValidSimulation; if (ValidSimulation) { SimulationResults[SimulationIndex].NumberOfInjectedFaults = NumberOfInjectedFaults; for (i = 0;i < NumberOfInjectedFaults;i++) Faults[ThreadIndex][FaultInjectionType][SimulationResults[SimulationIndex].TaregtClockCycles[i]][SimulationResults[SimulationIndex].TaregtCells[i]] = 1; ClockCycle = RunSimulation(Signals, ClockSignal, Max_No_ClockCycles, InitialSim_NumberOfClockCycles, InitialSim_NumberOfInputs, InitialSim_Inputs, InitialSim_Values, Cells, Regs, NumberOfRegs, MaxDepth, CellsInDepth, NumberOfCellsInDepth, CellTypes, EndSimCondition_Signals, EndSimCondition_Values, EndSimCondition_NumberOfSignals, EndSim_NumberOfWaitCycles, SignalValues[ThreadIndex], RegValues[ThreadIndex], Faults[ThreadIndex]); CheckResults(ClockCycle, Max_No_ClockCycles, EndSim_OutputNames, EndSim_Outputs_IndexL, EndSim_Outputs_IndexH, EndSim_Outputs_Base, EndSim_NumberOfOutputBlocks, Signals, NumberOfSignals, FaultFreeSignalValues, NumberOfFaultFreeOutputs, FaultFreeOutputs, SignalValues[ThreadIndex], SimulationResults[SimulationIndex], Print_Nondetected, Print_Detected, Print_Ineffective, IneffectiveCounter[ThreadIndex], NondetectedCounter[ThreadIndex], DetectedCounter[ThreadIndex], RunTimeOverCounter[ThreadIndex]); for (i = 0;i < NumberOfInjectedFaults;i++) Faults[ThreadIndex][FaultInjectionType][SimulationResults[SimulationIndex].TaregtClockCycles[i]][SimulationResults[SimulationIndex].TaregtCells[i]] = 0; #pragma omp atomic SimulationCounter++; if ((SimulationCounter & 0x7ff) == 0x7ff) { TotalDetected = 0; TotalNondetected = 0; TotalIneffective = 0; TotalRunTimeOver = 0; for (i = 0; i < Max_no_of_Threads; i++) { TotalDetected += DetectedCounter[i]; TotalNondetected += NondetectedCounter[i]; TotalIneffective += IneffectiveCounter[i]; TotalRunTimeOver += RunTimeOverCounter[i]; } int elapsed_secs = int(double(clock() - begin) / CLOCKS_PER_SEC); char Str1[200]; sprintf(Str1, "%04d:%02d Total: %d Ineffective: %d Detected: %d Non-detected: %d RunTimeOver: %d\n", elapsed_secs / 60, elapsed_secs % 60, SimulationCounter, TotalIneffective, TotalDetected, TotalNondetected, TotalRunTimeOver); printf(Str1); fprintf(EvaluationResultFile, Str1); } } else { free(SimulationResults[SimulationIndex].TaregtCells); free(SimulationResults[SimulationIndex].TaregtClockCycles); } } TotalDetected = 0; TotalNondetected = 0; TotalIneffective = 0; TotalRunTimeOver = 0; for (i = 0; i < Max_no_of_Threads; i++) { TotalDetected += DetectedCounter[i]; TotalNondetected += NondetectedCounter[i]; TotalIneffective += IneffectiveCounter[i]; TotalRunTimeOver += RunTimeOverCounter[i]; } int elapsed_secs = int(double(clock() - begin) / CLOCKS_PER_SEC); char Str1[200]; sprintf(Str1, "%04d:%02d Total: %d Ineffective: %d Detected: %d Non-detected: %d RunTimeOver: %d\n", elapsed_secs / 60, elapsed_secs % 60, SimulationCounter, TotalIneffective, TotalDetected, TotalNondetected, TotalRunTimeOver); printf(Str1); fprintf(EvaluationResultFile, Str1); fclose(EvaluationResultFile); return 0; } //***************************************************************************************
hwloc.c
/******************************************************************************* * Copyright 2019 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the AML project. * For more info, see https://github.com/anlsys/aml * * SPDX-License-Identifier: BSD-3-Clause ******************************************************************************/ #include "aml.h" #include "aml/area/hwloc.h" #include "aml/higher/replicaset.h" #include "aml/higher/replicaset/hwloc.h" extern hwloc_topology_t aml_topology; int aml_replicaset_hwloc_alloc(struct aml_replicaset **out, const hwloc_obj_type_t initiator_type) { struct aml_replicaset *replicaset = NULL; struct aml_replicaset_hwloc_data *data = NULL; // Check initiator type. const unsigned int n_initiator = hwloc_get_nbobjs_by_type(aml_topology, initiator_type); hwloc_obj_t initiator = hwloc_get_obj_by_type(aml_topology, initiator_type, 0); if (n_initiator == 0) return -AML_EDOM; if (initiator == NULL || initiator->cpuset == NULL || hwloc_bitmap_weight(initiator->cpuset) <= 0) return -AML_EINVAL; const unsigned int n_numa = hwloc_get_nbobjs_by_type(aml_topology, HWLOC_OBJ_NUMANODE); // Allocation replicaset = AML_INNER_MALLOC_ARRAY(n_numa + n_initiator, void *, struct aml_replicaset, struct aml_replicaset_hwloc_data); if (replicaset == NULL) return -AML_ENOMEM; // Set ops replicaset->ops = &aml_replicaset_hwloc_ops; // Set data replicaset->data = (struct aml_replicaset_data *)AML_INNER_MALLOC_GET_FIELD( replicaset, 2, struct aml_replicaset, struct aml_replicaset_hwloc_data); data = (struct aml_replicaset_hwloc_data *)replicaset->data; // Set replica pointers array replicaset->replica = (void **)AML_INNER_MALLOC_GET_ARRAY( replicaset, void *, struct aml_replicaset, struct aml_replicaset_hwloc_data); for (unsigned i = 0; i < n_numa; i++) replicaset->replica[i] = NULL; // Set initiator pointers array data->ptr = replicaset->replica + n_numa; // Set number of initiators data->num_ptr = n_initiator; // Set number of replicas to 0. Initialization will set // it to the correct value. replicaset->n = 0; *out = replicaset; return AML_SUCCESS; } int aml_replicaset_hwloc_create(struct aml_replicaset **out, const size_t size, const hwloc_obj_type_t initiator_type, const enum hwloc_distances_kind_e kind) { int err = -AML_FAILURE; struct aml_replicaset *replicaset = NULL; struct aml_replicaset_hwloc_data *data = NULL; struct aml_area *area = NULL; struct aml_area_hwloc_preferred_data *area_data = NULL; const unsigned int n_numa = hwloc_get_nbobjs_by_type(aml_topology, HWLOC_OBJ_NUMANODE); hwloc_obj_t targets[n_numa]; err = aml_replicaset_hwloc_alloc(&replicaset, initiator_type); if (err != AML_SUCCESS) return err; replicaset->size = size; data = (struct aml_replicaset_hwloc_data *)replicaset->data; // For each initiator allocate replica on preferred area for (hwloc_obj_t initiator = hwloc_get_obj_by_type(aml_topology, initiator_type, 0); initiator != NULL; initiator = initiator->next_cousin) { // Get preferred area. err = aml_area_hwloc_preferred_create(&area, initiator, kind); if (err != AML_SUCCESS) goto err_with_replicaset; area_data = (struct aml_area_hwloc_preferred_data *)area->data; // Search if preferred numa node is already a target for (unsigned i = 0; i < replicaset->n; i++) { if (targets[i] == area_data->numanodes[0]) { data->ptr[initiator->logical_index] = replicaset->replica[i]; goto next; } } // Preferred numa node is not a target yet. void *ptr = aml_area_mmap(area, size, NULL); if (ptr == NULL) { err = -AML_ENOMEM; goto err_with_replicas; } replicaset->replica[replicaset->n] = ptr; data->ptr[initiator->logical_index] = ptr; targets[replicaset->n] = area_data->numanodes[0]; replicaset->n++; next: // Area cleanup aml_area_hwloc_preferred_destroy(&area); } // Success *out = replicaset; return AML_SUCCESS; // Failure err_with_replicas: for (unsigned i = 0; i < replicaset->n; i++) munmap(replicaset->replica[i], size); err_with_replicaset: free(replicaset); return err; } void aml_replicaset_hwloc_destroy(struct aml_replicaset **replicaset) { if (replicaset == NULL || *replicaset == NULL) return; for (unsigned int i = 0; i < (*replicaset)->n; i++) munmap((*replicaset)->replica[i], (*replicaset)->size); free(*replicaset); *replicaset = NULL; } int aml_replicaset_hwloc_init(struct aml_replicaset *replicaset, const void *data) { #ifdef _OPENMP #pragma omp parallel for #endif for (unsigned i = 0; i < replicaset->n; i++) memcpy(replicaset->replica[i], data, replicaset->size); return AML_SUCCESS; } int aml_replicaset_hwloc_sync(struct aml_replicaset *replicaset, const unsigned int id) { #ifdef _OPENMP #pragma omp parallel for #endif for (unsigned i = 0; i < replicaset->n; i++) if (i != id) memcpy(replicaset->replica[i], replicaset->replica[id], replicaset->size); return AML_SUCCESS; } // See src/area/hwloc.c int aml_hwloc_local_initiator(hwloc_obj_t *out); void *aml_replicaset_hwloc_local_replica(struct aml_replicaset *replicaset) { int err; hwloc_obj_t initiator; struct aml_replicaset_hwloc_data *data = NULL; data = (struct aml_replicaset_hwloc_data *)replicaset->data; err = aml_hwloc_local_initiator(&initiator); if (err != AML_SUCCESS) return NULL; while (initiator != NULL && hwloc_get_nbobjs_by_depth(aml_topology, initiator->depth) > data->num_ptr) initiator = initiator->parent; if (initiator == NULL) return NULL; if (hwloc_get_nbobjs_by_depth(aml_topology, initiator->depth) < data->num_ptr) return NULL; return data->ptr[initiator->logical_index]; } struct aml_replicaset_ops aml_replicaset_hwloc_ops = { .init = aml_replicaset_hwloc_init, .sync = aml_replicaset_hwloc_sync, };
is.c
/************************************************************************* * * * N A S P A R A L L E L B E N C H M A R K S 3.3 * * * * O p e n M P V E R S I O N * * * * I S * * * ************************************************************************* * * * This benchmark is an OpenMP version of the NPB IS code. * * It is described in NAS Technical Report 99-011. * * * * Permission to use, copy, distribute and modify this software * * for any purpose with or without fee is hereby granted. We * * request, however, that all derived work reference the NAS * * Parallel Benchmarks 3.3. This software is provided "as is" * * without express or implied warranty. * * * * Information on NPB 3.3, including the technical report, the * * original specifications, source code, results and information * * on how to submit new results, is available at: * * * * http://www.nas.nasa.gov/Software/NPB/ * * * * Send comments or suggestions to npb@nas.nasa.gov * * * * NAS Parallel Benchmarks Group * * NASA Ames Research Center * * Mail Stop: T27A-1 * * Moffett Field, CA 94035-1000 * * * * E-mail: npb@nas.nasa.gov * * Fax: (650) 604-3957 * * * ************************************************************************* * * * Author: M. Yarrow * * H. Jin * * * *************************************************************************/ #include "npbparams.h" #include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "sim_api.h" /*****************************************************************/ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* To disable the use of buckets, comment out the following line */ #define USE_BUCKETS /* Uncomment below for cyclic schedule */ /*#define SCHED_CYCLIC*/ /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 16 #define MAX_KEY_LOG_2 11 #define NUM_BUCKETS_LOG_2 9 #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 20 #define MAX_KEY_LOG_2 16 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 23 #define MAX_KEY_LOG_2 19 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 25 #define MAX_KEY_LOG_2 21 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 27 #define MAX_KEY_LOG_2 23 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS D */ /*************/ #if CLASS == 'D' #define TOTAL_KEYS_LOG_2 31 #define MAX_KEY_LOG_2 27 #define NUM_BUCKETS_LOG_2 10 #endif #if CLASS == 'D' #define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2) #else #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #endif #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS TOTAL_KEYS #define SIZE_OF_BUFFERS NUM_KEYS #define MAX_ITERATIONS 10 #define TEST_ARRAY_SIZE 5 /*************************************/ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /*************************************/ #if CLASS == 'D' typedef long INT_TYPE; #else typedef int INT_TYPE; #endif /********************/ /* Some global info */ /********************/ INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /************************************/ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /************************************/ INT_TYPE key_array[SIZE_OF_BUFFERS], key_buff1[MAX_KEY], key_buff2[SIZE_OF_BUFFERS], partial_verify_vals[TEST_ARRAY_SIZE], **key_buff1_aptr = NULL; #ifdef USE_BUCKETS INT_TYPE **bucket_size, bucket_ptrs[NUM_BUCKETS]; #pragma omp threadprivate(bucket_ptrs) #endif /**********************/ /* Partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}, D_test_index_array[TEST_ARRAY_SIZE] = {1317351170,995930646,1157283250,1503301535,1453734525}, D_test_rank_array[TEST_ARRAY_SIZE] = {1,36538729,1978098519,2145192618,2147425337}; /***********************/ /* function prototypes */ /***********************/ double randlc( double *X, double *A ); void full_verify( void ); void c_print_results( char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags ); void timer_clear( int n ); void timer_start( int n ); void timer_stop( int n ); double timer_read( int n ); /* * FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /*****************************************************************/ /************* R A N D L C ************/ /************* ************/ /************* portable random number generator ************/ /*****************************************************************/ static int KS=0; static double R23, R46, T23, T46; #pragma omp threadprivate(KS, R23, R46, T23, T46) double randlc( double *X, double *A ) { double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; if (KS == 0) { R23 = 1.0; R46 = 1.0; T23 = 1.0; T46 = 1.0; for (i=1; i<=23; i++) { R23 = 0.50 * R23; T23 = 2.0 * T23; } for (i=1; i<=46; i++) { R46 = 0.50 * R46; T46 = 2.0 * T46; } KS = 1; } /* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* Break X into two parts such that X = 2^23 * X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } /*****************************************************************/ /************ F I N D _ M Y _ S E E D ************/ /************ ************/ /************ returns parallel random number seq seed ************/ /*****************************************************************/ /* * Create a random number sequence of total length nn residing * on np number of processors. Each processor will therefore have a * subsequence of length nn/np. This routine returns that random * number which is the first random number for the subsequence belonging * to processor rank kn, and which is used as seed for proc kn ran # gen. */ double find_my_seed( int kn, /* my processor rank, 0<=kn<=num procs */ int np, /* np = num procs */ long nn, /* total num of ran numbers, all procs */ double s, /* Ran num seed, for ex.: 314159265.00 */ double a ) /* Ran num gen mult, try 1220703125.00 */ { double t1,t2; long mq,nq,kk,ik; if ( kn == 0 ) return s; mq = (nn/4 + np - 1) / np; nq = mq * 4 * kn; /* number of rans to be skipped */ t1 = s; t2 = a; kk = nq; while ( kk > 1 ) { ik = kk / 2; if( 2 * ik == kk ) { (void)randlc( &t2, &t2 ); kk = ik; } else { (void)randlc( &t1, &t2 ); kk = kk - 1; } } (void)randlc( &t1, &t2 ); return( t1 ); } /*****************************************************************/ /************* C R E A T E _ S E Q ************/ /*****************************************************************/ void create_seq( double seed, double a ) { double x, s; INT_TYPE i, k; #pragma omp parallel private(x,s,i,k) { INT_TYPE k1, k2; double an = a; int myid, num_procs; INT_TYPE mq; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); #else myid = 0; num_procs = 1; #endif mq = (NUM_KEYS + num_procs - 1) / num_procs; k1 = mq * myid; k2 = k1 + mq; if ( k2 > NUM_KEYS ) k2 = NUM_KEYS; KS = 0; s = find_my_seed( myid, num_procs, (long)4*NUM_KEYS, seed, an ); k = MAX_KEY/4; for (i=k1; i<k2; i++) { x = randlc(&s, &an); x += randlc(&s, &an); x += randlc(&s, &an); x += randlc(&s, &an); key_array[i] = k*x; } } /*omp parallel*/ } /*****************************************************************/ /***************** Allocate Working Buffer ****************/ /*****************************************************************/ void *alloc_mem( size_t size ) { void *p; p = (void *)malloc(size); if (!p) { perror("Memory allocation error"); exit(1); } return p; } void alloc_key_buff( void ) { INT_TYPE i; int num_procs; #ifdef _OPENMP num_procs = omp_get_max_threads(); #else num_procs = 1; #endif #ifdef USE_BUCKETS bucket_size = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); for (i = 0; i < num_procs; i++) { bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS); } #pragma omp parallel for for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = 0; #else /*USE_BUCKETS*/ key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); key_buff1_aptr[0] = key_buff1; for (i = 1; i < num_procs; i++) { key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY); } #endif /*USE_BUCKETS*/ } /*****************************************************************/ /************* F U L L _ V E R I F Y ************/ /*****************************************************************/ void full_verify( void ) { INT_TYPE i, j; INT_TYPE k, k1, k2; /* Now, finally, sort the keys: */ /* Copy keys into work array; keys in key_array will be reassigned. */ #ifdef USE_BUCKETS /* Buckets are already sorted. Sorting keys within each bucket */ #ifdef SCHED_CYCLIC #pragma omp parallel for private(i,j,k,k1) schedule(static,1) #else #pragma omp parallel for private(i,j,k,k1) schedule(dynamic) #endif for( j=0; j< NUM_BUCKETS; j++ ) { k1 = (j > 0)? bucket_ptrs[j-1] : 0; for ( i = k1; i < bucket_ptrs[j]; i++ ) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } } #else #pragma omp parallel private(i,j,k,k1,k2) { #pragma omp for for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = key_array[i]; /* This is actual sorting. Each thread is responsible for a subset of key values */ j = omp_get_num_threads(); j = (MAX_KEY + j - 1) / j; k1 = j * omp_get_thread_num(); k2 = k1 + j; if (k2 > MAX_KEY) k2 = MAX_KEY; for( i=0; i<NUM_KEYS; i++ ) { if (key_buff2[i] >= k1 && key_buff2[i] < k2) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } } } /*omp parallel*/ #endif /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; #pragma omp parallel for reduction(+:j) for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) printf( "Full_verify: number of keys out of sort: %ld\n", (long)j ); else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { INT_TYPE i, k; INT_TYPE *key_buff_ptr, *key_buff_ptr2; #ifdef USE_BUCKETS int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE num_bucket_keys = (1L << shift); #endif key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) partial_verify_vals[i] = key_array[test_index_array[i]]; /* Setup pointers to key buffers */ #ifdef USE_BUCKETS key_buff_ptr2 = key_buff2; #else key_buff_ptr2 = key_array; #endif key_buff_ptr = key_buff1; #pragma omp parallel private(i, k) { INT_TYPE *work_buff, m, k1, k2; int myid = 0, num_procs = 1; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); #endif /* Bucket sort is known to improve cache performance on some */ /* cache based systems. But the actual performance may depend */ /* on cache size, problem size. */ #ifdef USE_BUCKETS work_buff = bucket_size[myid]; /* Initialize */ for( i=0; i<NUM_BUCKETS; i++ ) work_buff[i] = 0; /* Determine the number of keys in each bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) work_buff[key_array[i] >> shift]++; /* Accumulative bucket sizes are the bucket pointers. These are global sizes accumulated upon to each bucket */ bucket_ptrs[0] = 0; for( k=0; k< myid; k++ ) bucket_ptrs[0] += bucket_size[k][0]; for( i=1; i< NUM_BUCKETS; i++ ) { bucket_ptrs[i] = bucket_ptrs[i-1]; for( k=0; k< myid; k++ ) bucket_ptrs[i] += bucket_size[k][i]; for( k=myid; k< num_procs; k++ ) bucket_ptrs[i] += bucket_size[k][i-1]; } /* Sort into appropriate bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) { k = key_array[i]; key_buff2[bucket_ptrs[k >> shift]++] = k; } /* The bucket pointers now point to the final accumulated sizes */ if (myid < num_procs-1) { for( i=0; i< NUM_BUCKETS; i++ ) for( k=myid+1; k< num_procs; k++ ) bucket_ptrs[i] += bucket_size[k][i]; } /* Now, buckets are sorted. We only need to sort keys inside each bucket, which can be done in parallel. Because the distribution of the number of keys in the buckets is Gaussian, the use of a dynamic schedule should improve load balance, thus, performance */ #ifdef SCHED_CYCLIC #pragma omp for schedule(static,1) #else #pragma omp for schedule(dynamic) #endif for( i=0; i< NUM_BUCKETS; i++ ) { /* Clear the work array section associated with each bucket */ k1 = i * num_bucket_keys; k2 = k1 + num_bucket_keys; for ( k = k1; k < k2; k++ ) key_buff_ptr[k] = 0; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ m = (i > 0)? bucket_ptrs[i-1] : 0; for ( k = m; k < bucket_ptrs[i]; k++ ) key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */ /* population */ /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ key_buff_ptr[k1] += m; for ( k = k1+1; k < k2; k++ ) key_buff_ptr[k] += key_buff_ptr[k-1]; } #else /*USE_BUCKETS*/ work_buff = key_buff1_aptr[myid]; /* Clear the work array */ for( i=0; i<MAX_KEY; i++ ) work_buff[i] = 0; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ #pragma omp for nowait schedule(static) for( i=0; i<NUM_KEYS; i++ ) work_buff[key_buff_ptr2[i]]++; /* Now they have individual key */ /* population */ /* To obtain ranks of each key, successively add the individual key population */ for( i=0; i<MAX_KEY-1; i++ ) work_buff[i+1] += work_buff[i]; #pragma omp barrier /* Accumulate the global key population */ for( k=1; k<num_procs; k++ ) { #pragma omp for nowait schedule(static) for( i=0; i<MAX_KEY; i++ ) key_buff_ptr[i] += key_buff1_aptr[k][i]; } #endif /*USE_BUCKETS*/ } /*omp parallel*/ /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 < k && k <= NUM_KEYS-1 ) { INT_TYPE key_rank = key_buff_ptr[k-1]; int failed = 0; switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_rank != test_rank_array[i]+(iteration-2) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_rank != test_rank_array[i]+(iteration-1) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-(iteration-1) ) failed = 1; else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'D': if( i < 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; } if( failed == 1 ) printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, (int)i ); } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff_ptr; } /*****************************************************************/ /************* M A I N ****************/ /*****************************************************************/ int main( int argc, char **argv ) { int i, iteration, timer_on; double timecounter; FILE *fp; /* Initialize timers */ timer_on = 0; if ((fp = fopen("timer.flag", "r")) != NULL) { fclose(fp); timer_on = 1; } timer_clear( 0 ); if (timer_on) { timer_clear( 1 ); timer_clear( 2 ); timer_clear( 3 ); } if (timer_on) timer_start( 3 ); /* Initialize the verification arrays if a valid class */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) switch( CLASS ) { case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; case 'D': test_index_array[i] = D_test_index_array[i]; test_rank_array[i] = D_test_rank_array[i]; break; }; /* Printout initial NPB info */ printf ( "\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - IS Benchmark\n\n" ); printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS ); printf( " Iterations: %d\n", MAX_ITERATIONS ); #ifdef _OPENMP printf( " Number of available threads: %d\n", omp_get_max_threads() ); #endif printf( "\n" ); if (timer_on) timer_start( 1 ); /* Generate random number sequence and subsequent keys on all procs */ create_seq( 314159265.00, /* Random number gen seed */ 1220703125.00 ); /* Random number gen mult */ alloc_key_buff(); if (timer_on) timer_stop( 1 ); /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ rank( 1 ); /* Start verification counter */ passed_verification = 0; if( CLASS != 'S' ) printf( "\n iteration\n" ); /* Start timer */ timer_start( 0 ); parmacs_roi_begin(); /* This is the main iteration */ for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ ) { if( CLASS != 'S' ) printf( " %d\n", iteration ); parmacs_iter_begin(iteration); rank( iteration ); parmacs_iter_end(iteration); } /* End of timing, obtain maximum time of all processors */ parmacs_roi_end(); timer_stop( 0 ); timecounter = timer_read( 0 ); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ if (timer_on) timer_start( 2 ); full_verify(); if (timer_on) timer_stop( 2 ); if (timer_on) timer_stop( 3 ); /* The final printout */ if( passed_verification != 5*MAX_ITERATIONS + 1 ) passed_verification = 0; c_print_results( "IS", CLASS, (int)(TOTAL_KEYS/64), 64, 0, MAX_ITERATIONS, timecounter, ((double) (MAX_ITERATIONS*TOTAL_KEYS)) /timecounter/1000000., "keys ranked", passed_verification, NPBVERSION, COMPILETIME, CC, CLINK, C_LIB, C_INC, CFLAGS, CLINKFLAGS ); /* Print additional timers */ if (timer_on) { double t_total, t_percent; t_total = timer_read( 3 ); printf("\nAdditional timers -\n"); printf(" Total execution: %8.3f\n", t_total); if (t_total == 0.0) t_total = 1.0; timecounter = timer_read(1); t_percent = timecounter/t_total * 100.; printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent); timecounter = timer_read(0); t_percent = timecounter/t_total * 100.; printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent); timecounter = timer_read(2); t_percent = timecounter/t_total * 100.; printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent); } return 0; /**************************/ } /* E N D P R O G R A M */ /**************************/
NLmean_propag2dirs_sspacing3_tspacing6_sim12_acc12_neighbor5_tau0100.c
/* * compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++ * in the terminal: export OMP_NUM_THREADS=3 */ #include<stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <netcdf.h> #include <omp.h> /* This is the name of the data file we will read. */ #define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing3.nc" #define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag2dirs_sspacing3_tspacing6_sim12_acc12_neighbor5_tau0100.nc" /* all constants */ #define N_HR 96 #define SCALE_FACTOR_SPACE 3 #define SCALE_FACTOR_TIME 6 #define SIM_HAFTSIZE 12 #define ACC_HAFTSIZE 12 #define NEIGHBOR_HAFTSIZE 5 #define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1) #define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1) #define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1) #define TAU 0.1 #define NUM_VARS 1 #define NUM_SCALES 2 #define NUM_3DSNAPS 37 /* #3D snapshots */ #define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/ #define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */ #define NDIMS 4 /* Handle errors by printing an error message and exiting with a non-zero status. */ #define ERRCODE 2 #define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);} /* **********************************************************************************/ /* ****************************** USEFUL FUNCTIONS **********************************/ /* **********************************************************************************/ /* * get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end] */ void get_onesnap(double *arr1,double *arr2, int id_start, int id_end) { for (int i = id_start; i < id_end + 1; i++) arr2[i - id_start] = arr1[i]; } /* * put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2 */ void put_onesnap(double *arr1,double *arr2, int id_start, int id_end) { for (int i = id_start; i < id_end + 1; i++) arr1[i] = arr2[i - id_start]; } /* * norm_by_weight: normalize x[dim] by weight W[dim] */ void norm_by_weight(int dim, double *x, double *W) { for (int k = 0; k < dim; k++) x[k] = x[k]/W[k]; } void add_mat(int dim, double *sum, double *x1, double *x2) { for (int k = 0; k < dim; k++) sum[k] = x1[k] + x2[k]; } void initialize(int dim, double *x, double val) { for (int k = 0; k < dim; k++) x[k] = val; } /* **********************************************************************************/ /* ****************************** NETCDF UTILS **************************************/ /* **********************************************************************************/ /* * creat_netcdf: create the netcdf file [filename] contain [num_vars] variables * variable names are [varname] */ void create_netcdf(char *filename, int num_vars, char *varname[num_vars]) { int ncid_wr, retval_wr; int vel_varid_wr; int Nt, Nx, Ny, Nz; int dimids[NDIMS]; /* Create the file. */ if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr))) ERR(retval_wr); /* Define the dimensions. The record dimension is defined to have * unlimited length - it can grow as needed.*/ if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny))) ERR(retval_wr); if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz))) ERR(retval_wr); if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt))) ERR(retval_wr); /* Define the netCDF variables for the data. */ dimids[0] = Nt; dimids[1] = Nx; dimids[2] = Ny; dimids[3] = Nz; for (int i = 0; i<num_vars; i++) { if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr))) ERR(retval_wr); } /* End define mode (SHOULD NOT FORGET THIS!). */ if ((retval_wr = nc_enddef(ncid_wr))) ERR(retval_wr); /* Close the file. */ if ((retval_wr = nc_close(ncid_wr))) ERR(retval_wr); printf("\n *** SUCCESS creating file: %s!\n", filename); } /* * write_netcdf: * write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start] */ void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps) { int ncid_wr, retval_wr; int vel_varid_wr; /* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/ if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr))) ERR(retval_wr); /* Get variable*/ if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr))) ERR(retval_wr);; /* Put variable*/ if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0]))) ERR(retval_wr); /* Close the file. */ if ((retval_wr = nc_close(ncid_wr))) ERR(retval_wr); printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename); } /* * read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] * started at [snap_start] */ void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps) { int ncid_rd, retval_rd; int vel_varid_rd; /* ******** PREPARE TO READ ************* */ /* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/ if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd))) ERR(retval_rd); /* Get the varids of the velocity in netCDF */ if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd))) ERR(retval_rd); if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0]))) ERR(retval_rd); /* Close the file, freeing all resources. */ if ((retval_rd = nc_close(ncid_rd))) ERR(retval_rd); printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename); } /* **********************************************************************************/ /* ****************************** ESTIMATE_DISTANCE *********************************/ /* **********************************************************************************/ /* * estimate_distance: estimate the distances between ref patch and moving patches (prev and after) * patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1) * reference patch are centered at [center_ref_idy, center_ref_idz] * moving patches are centered at [center_moving_idy, center_moving_idz] * dist_all contain 2 elements: distances to moving patches in the prev and after plane * x_ref: reference plane * x_prev: previous plane * x_after: plane after * ref_ids_y(z): indices of points in reference patch * moving_ids_y(z): indices of points in moving patch */ void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids) { int neighbor_id, sim_id; int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE]; for (int m = 0; m < NEIGHBOR_FULLSIZE; m++) { for (int n = 0; n < NEIGHBOR_FULLSIZE; n++) { gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE; gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE; } } int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE]; for (int p = 0; p < SIM_FULLSIZE; p++) { for (int q = 0; q < SIM_FULLSIZE; q++) { gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE; gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE; } } int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE]; for (int p = 0; p < SIM_FULLSIZE; p++) for (int q = 0; q < SIM_FULLSIZE; q++) grid_sim[p][q] = p * SIM_FULLSIZE + q; for (int p = 0; p < ACC_FULLSIZE; p++) for (int q = 0; q < ACC_FULLSIZE; q++) acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q]; int valy, valz; long int grid_id; for (int i = 0; i < N_HR; i++) { for (int j = 0; j < N_HR; j++) { for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++) { for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++) { grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id; valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id]; valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id]; if (valy < 0) gridpatches_y[grid_id] = (N_HR - 1) + valy; else if (valy > (N_HR - 1)) gridpatches_y[grid_id] = valy - (N_HR - 1); else gridpatches_y[grid_id] = valy; if (valz < 0) gridpatches_z[grid_id] = (N_HR - 1) + valz; else if (valz > (N_HR - 1)) gridpatches_z[grid_id] = valz - (N_HR - 1); else gridpatches_z[grid_id] = valz; } } } } //printf("\n gridpatches_z: %i \n", gridpatches_y[0]); } /* **********************************************************************************/ /* ****************************** NLMEAN *********************************/ /* **********************************************************************************/ /* * estimate_distance: estimate the distances between ref patch and moving patches (prev and after) * patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1) * reference patch are centered at [center_ref_idy, center_ref_idz] * moving patches are centered at [center_moving_idy, center_moving_idz] * dist_all contain 2 elements: distances to moving patches in the prev and after plane * x_ref: reference plane * x_prev: previous plane * x_after: plane after * ref_ids_y(z): indices of points in reference patch * moving_ids_y(z): indices of points in moving patch */ /*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE], int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE], int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/ void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids) { double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE)); int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE; int est_idy; #pragma omp parallel for private (est_idy) for (est_idy = 0; est_idy < N_HR; est_idy++) for (int est_idz = 0; est_idz < N_HR; est_idz++) for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++) { int ref_idy, ref_idz, moving_idy, moving_idz; double du; double d = 0.0; long int grid_rid, grid_nid; for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++) { grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ; grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si; ref_idy = gridy[grid_rid]; moving_idy = gridy[grid_nid]; ref_idz = gridz[grid_rid]; moving_idz = gridz[grid_nid]; //compute distance btw reference patch and fusion patch du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz]; d = d + norm_fact*du*du; } double w = exp(-d/(2.0*TAU*TAU)); for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++) { int ai = accids[k]; grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ; grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai; ref_idy = gridy[grid_rid]; moving_idy = gridy[grid_nid]; ref_idz = gridz[grid_rid]; moving_idz = gridz[grid_nid]; x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz]; weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w; } //printf("\n w=%f\n ",w); } } void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset) { for (int t_est = t_first + 1; t_est <= t_bound1; t_est++) { int t_prev = t_est - 1; double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR]; get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); //Initialize with zeros initialize(N_HR * N_HR, xref_hf, 0.0); initialize(N_HR * N_HR, w, 0.0); // Propagation from previous planes NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref_hf, w); put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); } } void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset) { for (int t_est = t_last - 1; t_est >= t_bound2; --t_est) { int t_prev = t_est + 1; double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR]; get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); //Initialize with zeros initialize(N_HR * N_HR, xref_hf, 0.0); initialize(N_HR * N_HR, w, 0.0); // Propagation from previous planes NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref_hf, w); put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1); } } void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset) { double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR]; int t_prev = t_mid - 1; int t_after = t_mid + 1; //Initialize with zeros initialize(N_HR * N_HR, xref_hf, 0.0); initialize(N_HR * N_HR, w, 0.0); get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1); NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1); NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref_hf, w); put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1); } void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset) { double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR]; double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR]; int tc = (int)SCALE_FACTOR_TIME/2; if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; } for (int td = 1; td < tc; td++) { int t1 = t_first + td; // bound on left side int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side // Initialize with zeros initialize(N_HR * N_HR, xref1_hf, 0.0); initialize(N_HR * N_HR, w1, 0.0); initialize(N_HR * N_HR, xref2_hf, 0.0); initialize(N_HR * N_HR, w2, 0.0); get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1); //Propagate from left bound get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids); //Propagate from right bound get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids); // Normalize and put back norm_by_weight(N_HR*N_HR, xref1_hf, w1); put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1); norm_by_weight(N_HR*N_HR, xref2_hf, w2); put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1); } // Last plane in the center if (SCALE_FACTOR_TIME % 2 == 0) { initialize(N_HR * N_HR, xref1_hf, 0.0); initialize(N_HR * N_HR, w1, 0.0); get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1); get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1); get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1); NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids); norm_by_weight(N_HR*N_HR, xref1_hf, w1); put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1); } } /* **********************************************************************************/ /* ********************************** MAIN FUNCTION *********************************/ /* **********************************************************************************/ int main() { /* Creat the file to save results */ char *varnames[NUM_VARS] = {"x_rec_all"}; create_netcdf(FILENAME_WR, NUM_VARS, varnames); /* Allocate memory */ double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double)); double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double)); double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double)); /* read all snapshots */ size_t start_ids[4] = {0, 0, 0, 0}; size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR }; read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all); read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all); double time_all_start = omp_get_wtime(); double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double)); double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double)); double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double)); long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE; int *gridpatches_y = (int*)malloc(grid_size * sizeof(int)); int *gridpatches_z = (int*)malloc(grid_size * sizeof(int)); int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int)); generate_grids(gridpatches_y, gridpatches_z, acc_ids); for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++) { int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR; // put first PIV get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1); put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1); int block_id; for(block_id = 0; block_id < NUM_BLOCKS; block_id++) { double time_start = omp_get_wtime(); int t_first = SCALE_FACTOR_TIME*block_id; int t_last = SCALE_FACTOR_TIME*(block_id+1); // Put last PIV of the block get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1); put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1); propag_towardcenter(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_offset); printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start); } } // Write to file write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all); /* free memory */ free(x_rec); free(x_current_lf); free(x_current_hf); free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all); free(gridpatches_y); free(gridpatches_z); free(acc_ids); printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start); return 1; }
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// Move the instruction after an InsertPoint to the beginning of another /// BasicBlock. /// /// The instructions after \p IP are moved to the beginning of \p New which must /// not have any PHINodes. If \p CreateBranch is true, a branch instruction to /// \p New will be added such that there is no semantic change. Otherwise, the /// \p IP insert block remains degenerate and it is up to the caller to insert a /// terminator. void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New, bool CreateBranch); /// Splice a BasicBlock at an IRBuilder's current insertion point. Its new /// insert location will stick to after the instruction before the insertion /// point (instead of moving with the instruction the InsertPoint stores /// internally). void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch); /// Split a BasicBlock at an InsertPoint, even if the block is degenerate /// (missing the terminator). /// /// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed /// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch /// is true, a branch to the new successor will new created such that /// semantically there is no change; otherwise the block of the insertion point /// remains degenerate and it is the caller's responsibility to insert a /// terminator. Returns the new successor block. BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch, llvm::Twine Name = {}); /// Split a BasicBlock at \p Builder's insertion point, even if the block is /// degenerate (missing the terminator). Its new insert location will stick to /// after the instruction before the insertion point (instead of moving with the /// instruction the InsertPoint stores internally). BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch, llvm::Twine Name = {}); /// Split a BasicBlock at \p Builder's insertion point, even if the block is /// degenerate (missing the terminator). Its new insert location will stick to /// after the instruction before the insertion point (instead of moving with the /// instruction the InsertPoint stores internally). BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name); /// Like splitBB, but reuses the current block's name for the new name. BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch, llvm::Twine Suffix = ".split"); /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. void finalize(Function *Fn = nullptr); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location where additional instructions can be inserted. /// /// The CodeGenIP may be in the middle of a basic block or point to the end of /// it. The basic block may have a terminator or be degenerate. The callback /// function may just insert instructions at that position, but also split the /// block (without the Before argument of BasicBlock::splitBasicBlock such /// that the identify of the split predecessor block is preserved) and insert /// additional control flow, including branches that do not lead back to what /// follows the CodeGenIP. Note that since the callback is allowed to split /// the block, callers must assume that InsertPoints to positions in the /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If /// such InsertPoints need to be preserved, it can split the block itself /// before calling the callback. /// /// AllocaIP and CodeGenIP must not point to the same position. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. The BasicBlock it is pointing to must /// not be split. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { LocationDescription(const IRBuilderBase &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); private: /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Modifies the canonical loop a statically-scheduled workshare loop with a /// user-specified chunk size. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in /// the preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after the /// loop. /// \param ChunkSize The user-specified chunk size. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticChunkedWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *ChunkSize); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); public: /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// The concrete transformation is done by applyStaticWorkshareLoop, /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending /// on the value of \p SchedKind and \p ChunkSize. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param SchedKind Scheduling algorithm to use. /// \param ChunkSize The chunk size for the inner loop. /// \param HasSimdModifier Whether the simd modifier is present in the /// schedule clause. /// \param HasMonotonicModifier Whether the monotonic modifier is present in /// the schedule clause. /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is /// present in the schedule clause. /// \param HasOrderedClause Whether the (parameterless) ordered clause is /// present. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop( DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default, Value *ChunkSize = nullptr, bool HasSimdModifier = false, bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false, bool HasOrderedClause = false); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Fully unroll a loop. /// /// Instead of unrolling the loop immediately (and duplicating its body /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop /// metadata. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop); /// Fully or partially unroll a loop. How the loop is unrolled is determined /// using LLVM's LoopUnrollPass. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop); /// Partially unroll a loop. /// /// The CanonicalLoopInfo of the unrolled loop for use with chained /// loop-associated directive can be requested using \p UnrolledCLI. Not /// needing the CanonicalLoopInfo allows more efficient code generation by /// deferring the actual unrolling to the LoopUnrollPass using loop metadata. /// A loop-associated directive applied to the unrolled loop needs to know the /// new trip count which means that if using a heuristically determined unroll /// factor (\p Factor == 0), that factor must be computed immediately. We are /// using the same logic as the LoopUnrollPass to derived the unroll factor, /// but which assumes that some canonicalization has taken place (e.g. /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform /// better when the unrolled loop's CanonicalLoopInfo is not needed. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. /// \param Factor The factor to unroll the loop by. A factor of 0 /// indicates that a heuristic should be used to determine /// the unroll-factor. /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the /// partially unrolled loop. Otherwise, uses loop metadata /// to defer unrolling to the LoopUnrollPass. void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI); /// Add metadata to simd-ize a loop. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to simd-ize. void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction, as well as /// the element type of these pointers. They are expected to atomically /// update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) { assert(cast<PointerType>(Variable->getType()) ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type"); } /// Reduction element type, must match pointee type of variable. Type *ElementType; /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param ReductionInfos A list of info on each reduction variable. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc, uint32_t &SrcLocStrSize); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); /// Create a hidden global flag \p Name in the module with initial value \p /// Value. GlobalValue *createGlobalFlag(unsigned Value, StringRef Name); /// Create an offloading section struct used to register this global at /// runtime. /// /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; /// /// \param Addr The pointer to the global being registered. /// \param Name The symbol name associated with the global. /// \param Size The size in bytes of the global (0 for functions). /// \param Flags Flags associated with the entry. /// \param SectionName The section this entry will be placed at. void emitOffloadingEntry(Constant *Addr, StringRef Name, uint64_t Size, int32_t Flags, StringRef SectionName = "omp_offloading_entries"); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB, *OuterAllocaBB; SmallVector<Value *, 2> ExcludeArgsFromAggregate; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArg The argument types. /// \param MapnamesArg The argument names. /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param NumOperands Number of operands in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param IsNowait If false, a barrier is emitted. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsNowait, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the masked. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the critical. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp ordered depend (source | sink)' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param NumLoops The number of loops in depend clause. /// \param StoreValues The value will be stored in vector address. /// \param Name The name of alloca instruction. /// \param IsDependSource If true, depend source; otherwise, depend sink. /// /// \return The insertion position *after* the ordered. InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef<llvm::Value *> StoreValues, const Twine &Name, bool IsDependSource); /// Generator for '#omp ordered [threads | simd]' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param IsThreads If true, with threads clause or without clause; /// otherwise, with simd clause; /// /// \returns The insertion position *after* the ordered. InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// Create a runtime call for __tgt_interop_init /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param InteropType type of interop operation /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_init call CallInst *createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_destroy /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_destroy call CallInst *createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_use /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_use call CallInst *createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture, Compare }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocaIP The insertion point to be used for alloca /// instructions. /// \param X The target atomic pointer to be updated /// \param XElemTy The element type of the atomic pointer. /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; Type *ElemTy = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr); /// Emit atomic compare for constructs: --- Only scalar data types /// cond-update-atomic: /// x = x ordop expr ? expr : x; /// x = expr ordop x ? expr : x; /// x = x == e ? d : x; /// x = e == x ? d : x; (this one is not in the spec) /// cond-update-stmt: /// if (x ordop expr) { x = expr; } /// if (expr ordop x) { x = expr; } /// if (x == e) { x = d; } /// if (e == x) { x = d; } (this one is not in the spec) /// /// \param Loc The insert and source location description. /// \param X The target atomic pointer to be updated. /// \param E The expected value ('e') for forms that use an /// equality comparison or an expression ('expr') for /// forms that use 'ordop' (logically an atomic maximum or /// minimum). /// \param D The desired value for forms that use an equality /// comparison. If forms that use 'ordop', it should be /// \p nullptr. /// \param AO Atomic ordering of the generated atomic instructions. /// \param Op Atomic compare operation. It can only be ==, <, or >. /// \param IsXBinopExpr True if the conditional statement is in the form where /// x is on LHS. It only matters for < or >. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X, Value *E, Value *D, AtomicOrdering AO, omp::OMPAtomicCompareOp Op, bool IsXBinopExpr); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); /// Sets the number of loop iterations to the given value. This value must be /// valid in the condition block (i.e., defined in the preheader) and is /// interpreted as an unsigned integer. void setTripCount(Value *TripCount); /// Replace all uses of the canonical induction variable in the loop body with /// a new one. /// /// The intended use case is to update the induction variable for an updated /// iteration space such that it can stay normalized in the 0...tripcount-1 /// range. /// /// The \p Updater is called with the (presumable updated) current normalized /// induction variable and is expected to return the value that uses of the /// pre-updated induction values should use instead, typically dependent on /// the new induction variable. This is a lambda (instead of e.g. just passing /// the new value) to be able to distinguish the uses of the pre-updated /// induction variable and uses of the induction varible to compute the /// updated induction variable value. void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const; /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0); } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return Exit->getSingleSuccessor(); } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Preheader = getPreheader(); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Body = getBody(); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *After = getAfter(); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
microkernels.h
#ifndef MICROKERNELS_H_ #define MICROKERNELS_H_ static void kernel_o2_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "movq $7, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 24(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 48(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 24(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 48(%%rdx)%{%%k1%}\n\t" "addq $24, %%rdx\n\t" "addq $24, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 54; #endif } static void kernel_o2_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "movq $7, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 24(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 48(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 24(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 48(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 24(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 48(%%rdx)%{%%k1%}\n\t" "addq $24, %%rdx\n\t" "addq $24, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 54; #endif } static void kernel_o3_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "movq $63, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $48, %%r15\n\t" "movq $144, %%rax\n\t" "movq $240, %%rbx\n\t" "movq $336, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 48(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 96(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 144(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 192(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 240(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 48(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 96(%%rdx)%{%%k1%}\n\t" "addq $48, %%rdx\n\t" "addq $48, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 216; #endif } static void kernel_o3_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "movq $63, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 48(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 96(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 48(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 96(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 48(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 96(%%rdx)%{%%k1%}\n\t" "addq $48, %%rdx\n\t" "addq $48, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 108; #endif } static void kernel_o4_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $80, %%r15\n\t" "movq $240, %%rax\n\t" "movq $400, %%rbx\n\t" "movq $560, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 80(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 160(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 240(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 320(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 400(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 480(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 560(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 640(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 720(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 80(%%rdx)\n\t" "vmovupd %%zmm31, 160(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $8, %%r12\n\t" "jl 33b\n\t" "movq $3, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $80, %%r15\n\t" "movq $240, %%rax\n\t" "movq $400, %%rbx\n\t" "movq $560, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 80(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 160(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 240(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 320(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 400(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 480(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 560(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 640(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 720(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 80(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 160(%%rdx)%{%%k1%}\n\t" "addq $16, %%rdx\n\t" "addq $16, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 600; #endif } static void kernel_o4_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 80(%%rdx), %%zmm30\n\t" "vmovupd 160(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 80(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 160(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 80(%%rdx)\n\t" "vmovupd %%zmm31, 160(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $8, %%r12\n\t" "jl 33b\n\t" "movq $3, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 80(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 160(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 80(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 160(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 80(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 160(%%rdx)%{%%k1%}\n\t" "addq $16, %%rdx\n\t" "addq $16, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 180; #endif } static void kernel_o5_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $120, %%r15\n\t" "movq $360, %%rax\n\t" "movq $600, %%rbx\n\t" "movq $840, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 120(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 240(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 360(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 480(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 600(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 720(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 840(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 960(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1080(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1200(%%rdi), %%zmm0\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1320(%%rdi), %%zmm1\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1440(%%rdi), %%zmm0\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1560(%%rdi), %%zmm1\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1680(%%rdi), %%zmm0\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 120(%%rdx)\n\t" "vmovupd %%zmm31, 240(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $8, %%r12\n\t" "jl 33b\n\t" "movq $127, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $120, %%r15\n\t" "movq $360, %%rax\n\t" "movq $600, %%rbx\n\t" "movq $840, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 120(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 240(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 360(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 480(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 600(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 720(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 840(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 960(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1080(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1200(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1320(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1440(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1560(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1680(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 120(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 240(%%rdx)%{%%k1%}\n\t" "addq $56, %%rdx\n\t" "addq $56, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 1350; #endif } static void kernel_o5_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 120(%%rdx), %%zmm30\n\t" "vmovupd 240(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 120(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 240(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 120(%%rdx)\n\t" "vmovupd %%zmm31, 240(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $8, %%r12\n\t" "jl 33b\n\t" "movq $127, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 120(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 240(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 120(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 240(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 120(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 240(%%rdx)%{%%k1%}\n\t" "addq $56, %%rdx\n\t" "addq $56, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 270; #endif } static void kernel_o6_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $168, %%r15\n\t" "movq $504, %%rax\n\t" "movq $840, %%rbx\n\t" "movq $1176, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 168(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 336(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 504(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 672(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 840(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1008(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1176(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1344(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1512(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1680(%%rdi), %%zmm0\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1848(%%rdi), %%zmm1\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2016(%%rdi), %%zmm0\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2184(%%rdi), %%zmm1\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2352(%%rdi), %%zmm0\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2520(%%rdi), %%zmm1\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2688(%%rdi), %%zmm0\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2856(%%rdi), %%zmm1\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3024(%%rdi), %%zmm0\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3192(%%rdi), %%zmm1\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3360(%%rdi), %%zmm0\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 168(%%rdx)\n\t" "vmovupd %%zmm31, 336(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $16, %%r12\n\t" "jl 33b\n\t" "movq $31, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $168, %%r15\n\t" "movq $504, %%rax\n\t" "movq $840, %%rbx\n\t" "movq $1176, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 168(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 336(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 504(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 672(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 840(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1008(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1176(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1344(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1512(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1680(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1848(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2016(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2184(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2352(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2520(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2688(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2856(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3024(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3192(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3360(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 168(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 336(%%rdx)%{%%k1%}\n\t" "addq $40, %%rdx\n\t" "addq $40, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 2646; #endif } static void kernel_o6_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 168(%%rdx), %%zmm30\n\t" "vmovupd 336(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 168(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 336(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 168(%%rdx)\n\t" "vmovupd %%zmm31, 336(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $16, %%r12\n\t" "jl 33b\n\t" "movq $31, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 168(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 336(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 168(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 336(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 168(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 336(%%rdx)%{%%k1%}\n\t" "addq $40, %%rdx\n\t" "addq $40, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 378; #endif } static void kernel_o7_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $224, %%r15\n\t" "movq $672, %%rax\n\t" "movq $1120, %%rbx\n\t" "movq $1568, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 224(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 448(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 672(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 896(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1120(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1344(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1568(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1792(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2016(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2240(%%rdi), %%zmm0\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2464(%%rdi), %%zmm1\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2688(%%rdi), %%zmm0\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2912(%%rdi), %%zmm1\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3136(%%rdi), %%zmm0\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3360(%%rdi), %%zmm1\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3584(%%rdi), %%zmm0\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3808(%%rdi), %%zmm1\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4032(%%rdi), %%zmm0\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4256(%%rdi), %%zmm1\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4480(%%rdi), %%zmm0\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4704(%%rdi), %%zmm1\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4928(%%rdi), %%zmm0\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5152(%%rdi), %%zmm1\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5376(%%rdi), %%zmm0\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 5600(%%rdi), %%zmm1\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5824(%%rdi), %%zmm0\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6048(%%rdi), %%zmm1\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 224(%%rdx)\n\t" "vmovupd %%zmm31, 448(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $24, %%r12\n\t" "jl 33b\n\t" "movq $15, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $224, %%r15\n\t" "movq $672, %%rax\n\t" "movq $1120, %%rbx\n\t" "movq $1568, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 224(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 448(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 672(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 896(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1120(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1344(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1568(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1792(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2016(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2240(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2464(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2688(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2912(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3136(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3360(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3584(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3808(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4032(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4256(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4480(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4704(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4928(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5152(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5376(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 5600(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5824(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6048(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 224(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 448(%%rdx)%{%%k1%}\n\t" "addq $32, %%rdx\n\t" "addq $32, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 4704; #endif } static void kernel_o7_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 224(%%rdx), %%zmm30\n\t" "vmovupd 448(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 224(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 448(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 224(%%rdx)\n\t" "vmovupd %%zmm31, 448(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $24, %%r12\n\t" "jl 33b\n\t" "movq $15, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 224(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 448(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 224(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 448(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 224(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 448(%%rdx)%{%%k1%}\n\t" "addq $32, %%rdx\n\t" "addq $32, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 504; #endif } static void kernel_o8_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $288, %%r15\n\t" "movq $864, %%rax\n\t" "movq $1440, %%rbx\n\t" "movq $2016, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 288(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 576(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 864(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1152(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1440(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1728(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2016(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2304(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2592(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2880(%%rdi), %%zmm0\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3168(%%rdi), %%zmm1\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3456(%%rdi), %%zmm0\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3744(%%rdi), %%zmm1\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4032(%%rdi), %%zmm0\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4320(%%rdi), %%zmm1\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4608(%%rdi), %%zmm0\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4896(%%rdi), %%zmm1\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5184(%%rdi), %%zmm0\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5472(%%rdi), %%zmm1\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5760(%%rdi), %%zmm0\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6048(%%rdi), %%zmm1\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6336(%%rdi), %%zmm0\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6624(%%rdi), %%zmm1\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 6912(%%rdi), %%zmm0\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7200(%%rdi), %%zmm1\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7488(%%rdi), %%zmm0\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 7776(%%rdi), %%zmm1\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 8064(%%rdi), %%zmm0\n\t" "addq $8352, %%rdi\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm1\n\t" "vfmadd231pd 224(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 224(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 224(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 288(%%rdi), %%zmm0\n\t" "vfmadd231pd 232(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 232(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 232(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 576(%%rdi), %%zmm1\n\t" "vfmadd231pd 240(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 240(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 240(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 864(%%rdi), %%zmm0\n\t" "vfmadd231pd 248(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 248(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 248(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1152(%%rdi), %%zmm1\n\t" "vfmadd231pd 256(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 256(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 256(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1440(%%rdi), %%zmm0\n\t" "vfmadd231pd 264(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 264(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 264(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1728(%%rdi), %%zmm1\n\t" "vfmadd231pd 272(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 272(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 272(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vfmadd231pd 280(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 280(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 280(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "subq $8352, %%rdi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 288(%%rdx)\n\t" "vmovupd %%zmm31, 576(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $32, %%r12\n\t" "jl 33b\n\t" "movq $15, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $288, %%r15\n\t" "movq $864, %%rax\n\t" "movq $1440, %%rbx\n\t" "movq $2016, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 288(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 576(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 864(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1152(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1440(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1728(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2016(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2304(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2592(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2880(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3168(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3456(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3744(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4032(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4320(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4608(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4896(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5184(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5472(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5760(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6048(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6336(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6624(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 6912(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7200(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7488(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 7776(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 8064(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "addq $8352, %%rdi\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 224(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 224(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 224(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 288(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 232(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 232(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 232(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 576(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 240(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 240(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 240(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 864(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 248(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 248(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 248(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1152(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 256(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 256(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 256(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1440(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 264(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 264(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 264(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1728(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 272(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 272(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 272(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vfmadd231pd 280(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 280(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 280(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "subq $8352, %%rdi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 288(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 576(%%rdx)%{%%k1%}\n\t" "addq $32, %%rdx\n\t" "addq $32, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 7776; #endif } static void kernel_o8_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 288(%%rdx), %%zmm30\n\t" "vmovupd 576(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 288(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 576(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 288(%%rdx)\n\t" "vmovupd %%zmm31, 576(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $32, %%r12\n\t" "jl 33b\n\t" "movq $15, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 288(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 576(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 288(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 576(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 288(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 576(%%rdx)%{%%k1%}\n\t" "addq $32, %%rdx\n\t" "addq $32, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 648; #endif } static void kernel_o9_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $360, %%r15\n\t" "movq $1080, %%rax\n\t" "movq $1800, %%rbx\n\t" "movq $2520, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 360(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 720(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1080(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1440(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1800(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2160(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2520(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2880(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3240(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3600(%%rdi), %%zmm0\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3960(%%rdi), %%zmm1\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4320(%%rdi), %%zmm0\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4680(%%rdi), %%zmm1\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5040(%%rdi), %%zmm0\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5400(%%rdi), %%zmm1\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5760(%%rdi), %%zmm0\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6120(%%rdi), %%zmm1\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6480(%%rdi), %%zmm0\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6840(%%rdi), %%zmm1\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7200(%%rdi), %%zmm0\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7560(%%rdi), %%zmm1\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7920(%%rdi), %%zmm0\n\t" "addq $8280, %%rdi\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 0(%%rdi), %%zmm1\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 360(%%rdi), %%zmm0\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 720(%%rdi), %%zmm1\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1080(%%rdi), %%zmm0\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1440(%%rdi), %%zmm1\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1800(%%rdi), %%zmm0\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2160(%%rdi), %%zmm1\n\t" "vfmadd231pd 224(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 224(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 224(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2520(%%rdi), %%zmm0\n\t" "vfmadd231pd 232(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 232(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 232(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2880(%%rdi), %%zmm1\n\t" "vfmadd231pd 240(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 240(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 240(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3240(%%rdi), %%zmm0\n\t" "vfmadd231pd 248(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 248(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 248(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3600(%%rdi), %%zmm1\n\t" "vfmadd231pd 256(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 256(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 256(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3960(%%rdi), %%zmm0\n\t" "vfmadd231pd 264(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 264(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 264(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4320(%%rdi), %%zmm1\n\t" "vfmadd231pd 272(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 272(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 272(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4680(%%rdi), %%zmm0\n\t" "vfmadd231pd 280(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 280(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 280(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 5040(%%rdi), %%zmm1\n\t" "vfmadd231pd 288(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 288(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 288(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5400(%%rdi), %%zmm0\n\t" "vfmadd231pd 296(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 296(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 296(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5760(%%rdi), %%zmm1\n\t" "vfmadd231pd 304(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 304(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 304(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 6120(%%rdi), %%zmm0\n\t" "vfmadd231pd 312(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 312(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 312(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6480(%%rdi), %%zmm1\n\t" "vfmadd231pd 320(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 320(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 320(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6840(%%rdi), %%zmm0\n\t" "vfmadd231pd 328(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 328(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 328(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 7200(%%rdi), %%zmm1\n\t" "vfmadd231pd 336(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 336(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 336(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7560(%%rdi), %%zmm0\n\t" "vfmadd231pd 344(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 344(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 344(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vfmadd231pd 352(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 352(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 352(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "subq $8280, %%rdi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 360(%%rdx)\n\t" "vmovupd %%zmm31, 720(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $40, %%r12\n\t" "jl 33b\n\t" "movq $31, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $360, %%r15\n\t" "movq $1080, %%rax\n\t" "movq $1800, %%rbx\n\t" "movq $2520, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 360(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 720(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1080(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1440(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1800(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2160(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2520(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2880(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3240(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3600(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3960(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4320(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4680(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5040(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5400(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5760(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6120(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6480(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6840(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7200(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7560(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7920(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "addq $8280, %%rdi\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 0(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 360(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 720(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1080(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1440(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1800(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2160(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 224(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 224(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 224(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2520(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 232(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 232(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 232(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2880(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 240(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 240(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 240(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3240(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 248(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 248(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 248(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3600(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 256(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 256(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 256(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3960(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 264(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 264(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 264(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4320(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 272(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 272(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 272(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4680(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 280(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 280(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 280(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 5040(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 288(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 288(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 288(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5400(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 296(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 296(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 296(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5760(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 304(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 304(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 304(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 6120(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 312(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 312(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 312(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6480(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 320(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 320(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 320(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6840(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 328(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 328(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 328(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 7200(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 336(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 336(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 336(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7560(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 344(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 344(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 344(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vfmadd231pd 352(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 352(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 352(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "subq $8280, %%rdi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 360(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 720(%%rdx)%{%%k1%}\n\t" "addq $40, %%rdx\n\t" "addq $40, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 12150; #endif } static void kernel_o9_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 360(%%rdx), %%zmm30\n\t" "vmovupd 720(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 360(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 720(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 360(%%rdx)\n\t" "vmovupd %%zmm31, 720(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $40, %%r12\n\t" "jl 33b\n\t" "movq $31, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 360(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 720(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 360(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 720(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 360(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 720(%%rdx)%{%%k1%}\n\t" "addq $40, %%rdx\n\t" "addq $40, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 810; #endif } static void kernel_o10_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $440, %%r15\n\t" "movq $1320, %%rax\n\t" "movq $2200, %%rbx\n\t" "movq $3080, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 440(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 880(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1320(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1760(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2200(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2640(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3080(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3520(%%rdi), %%zmm0\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3960(%%rdi), %%zmm1\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4400(%%rdi), %%zmm0\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4840(%%rdi), %%zmm1\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5280(%%rdi), %%zmm0\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 5720(%%rdi), %%zmm1\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6160(%%rdi), %%zmm0\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6600(%%rdi), %%zmm1\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7040(%%rdi), %%zmm0\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7480(%%rdi), %%zmm1\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7920(%%rdi), %%zmm0\n\t" "addq $8360, %%rdi\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 0(%%rdi), %%zmm1\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 440(%%rdi), %%zmm0\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 880(%%rdi), %%zmm1\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1320(%%rdi), %%zmm0\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1760(%%rdi), %%zmm1\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2200(%%rdi), %%zmm0\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2640(%%rdi), %%zmm1\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3080(%%rdi), %%zmm0\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3520(%%rdi), %%zmm1\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3960(%%rdi), %%zmm0\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4400(%%rdi), %%zmm1\n\t" "vfmadd231pd 224(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 224(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 224(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4840(%%rdi), %%zmm0\n\t" "vfmadd231pd 232(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 232(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 232(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5280(%%rdi), %%zmm1\n\t" "vfmadd231pd 240(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 240(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 240(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5720(%%rdi), %%zmm0\n\t" "vfmadd231pd 248(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 248(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 248(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6160(%%rdi), %%zmm1\n\t" "vfmadd231pd 256(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 256(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 256(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6600(%%rdi), %%zmm0\n\t" "vfmadd231pd 264(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 264(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 264(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 7040(%%rdi), %%zmm1\n\t" "vfmadd231pd 272(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 272(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 272(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7480(%%rdi), %%zmm0\n\t" "vfmadd231pd 280(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 280(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 280(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7920(%%rdi), %%zmm1\n\t" "addq $8360, %%rdi\n\t" "vfmadd231pd 288(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 288(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 288(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vfmadd231pd 296(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 296(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 296(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 440(%%rdi), %%zmm1\n\t" "vfmadd231pd 304(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 304(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 304(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 880(%%rdi), %%zmm0\n\t" "vfmadd231pd 312(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 312(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 312(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1320(%%rdi), %%zmm1\n\t" "vfmadd231pd 320(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 320(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 320(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1760(%%rdi), %%zmm0\n\t" "vfmadd231pd 328(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 328(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 328(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2200(%%rdi), %%zmm1\n\t" "vfmadd231pd 336(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 336(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 336(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2640(%%rdi), %%zmm0\n\t" "vfmadd231pd 344(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 344(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 344(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3080(%%rdi), %%zmm1\n\t" "vfmadd231pd 352(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 352(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 352(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3520(%%rdi), %%zmm0\n\t" "vfmadd231pd 360(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 360(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 360(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3960(%%rdi), %%zmm1\n\t" "vfmadd231pd 368(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 368(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 368(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4400(%%rdi), %%zmm0\n\t" "vfmadd231pd 376(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 376(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 376(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4840(%%rdi), %%zmm1\n\t" "vfmadd231pd 384(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 384(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 384(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5280(%%rdi), %%zmm0\n\t" "vfmadd231pd 392(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 392(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 392(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5720(%%rdi), %%zmm1\n\t" "vfmadd231pd 400(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 400(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 400(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 6160(%%rdi), %%zmm0\n\t" "vfmadd231pd 408(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 408(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 408(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6600(%%rdi), %%zmm1\n\t" "vfmadd231pd 416(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 416(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 416(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7040(%%rdi), %%zmm0\n\t" "vfmadd231pd 424(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 424(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 424(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 432(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 432(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 432(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "subq $16720, %%rdi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 440(%%rdx)\n\t" "vmovupd %%zmm31, 880(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $48, %%r12\n\t" "jl 33b\n\t" "movq $127, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $440, %%r15\n\t" "movq $1320, %%rax\n\t" "movq $2200, %%rbx\n\t" "movq $3080, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 440(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 880(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1320(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 1760(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2200(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 2640(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3080(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3520(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3960(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 64(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 64(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 64(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4400(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 72(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 72(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 72(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4840(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 80(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 80(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 80(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5280(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 88(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 88(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 88(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 5720(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 96(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 96(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 96(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6160(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 104(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 104(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 104(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 6600(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 112(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 112(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 112(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7040(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 120(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 120(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 120(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7480(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 128(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 128(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 128(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7920(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "addq $8360, %%rdi\n\t" "vfmadd231pd 136(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 136(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 136(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 0(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 144(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 144(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 144(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 440(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 152(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 152(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 152(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 880(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 160(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 160(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 160(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1320(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 168(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 168(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 168(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1760(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 176(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 176(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 176(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2200(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 184(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 184(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 184(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2640(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 192(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 192(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 192(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3080(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 200(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 200(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 200(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3520(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 208(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 208(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 208(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 3960(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 216(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 216(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 216(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4400(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 224(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 224(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 224(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 4840(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 232(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 232(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 232(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5280(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 240(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 240(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 240(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 5720(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 248(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 248(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 248(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6160(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 256(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 256(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 256(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 6600(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 264(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 264(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 264(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 7040(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 272(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 272(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 272(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 7480(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 280(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 280(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 280(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 7920(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "addq $8360, %%rdi\n\t" "vfmadd231pd 288(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 288(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 288(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 296(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 296(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 296(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 440(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 304(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 304(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 304(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 880(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 312(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 312(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 312(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 1320(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 320(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 320(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 320(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1760(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 328(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 328(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 328(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 2200(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 336(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 336(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 336(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2640(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 344(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 344(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 344(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3080(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 352(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 352(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 352(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3520(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 360(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 360(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 360(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3960(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 368(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 368(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 368(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 4400(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 376(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 376(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 376(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 4840(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 384(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 384(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 384(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 5280(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 392(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 392(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 392(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 5720(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 400(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 400(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 400(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 6160(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 408(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 408(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 408(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 6600(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 416(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 416(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 416(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 7040(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 424(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 424(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 424(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 432(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 432(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 432(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "subq $16720, %%rdi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 440(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 880(%%rdx)%{%%k1%}\n\t" "addq $56, %%rdx\n\t" "addq $56, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 18150; #endif } static void kernel_o10_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 440(%%rdx), %%zmm30\n\t" "vmovupd 880(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 440(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 880(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 440(%%rdx)\n\t" "vmovupd %%zmm31, 880(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $48, %%r12\n\t" "jl 33b\n\t" "movq $127, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 440(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 880(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 440(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 880(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 440(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 880(%%rdx)%{%%k1%}\n\t" "addq $56, %%rdx\n\t" "addq $56, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 990; #endif } static void kernel_o11_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $0, %%r14\n\t" "34:\n\t" "addq $8, %%r14\n\t" "movq $528, %%r15\n\t" "movq $1584, %%rax\n\t" "movq $2640, %%rbx\n\t" "movq $3696, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 528(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1056(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1584(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2112(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2640(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3168(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3696(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "addq $4224, %%rdi\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "addq $64, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "cmpq $64, %%r14\n\t" "jl 34b\n\t" "movq $528, %%r15\n\t" "movq $1584, %%rax\n\t" "movq $2640, %%rbx\n\t" "movq $3696, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 528(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "addq $1056, %%rdi\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "addq $16, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "subq $528, %%rsi\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 528(%%rdx)\n\t" "vmovupd %%zmm31, 1056(%%rdx)\n\t" "addq $64, %%rdx\n\t" "subq $34784, %%rdi\n\t" "cmpq $64, %%r12\n\t" "jl 33b\n\t" "movq $3, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r14\n\t" "movq $528, %%r15\n\t" "movq $1584, %%rax\n\t" "movq $2640, %%rbx\n\t" "movq $3696, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 528(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1056(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1584(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2112(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 2640(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3168(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 3696(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "addq $4224, %%rdi\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "addq $64, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "cmpq $64, %%r14\n\t" "jl 33b\n\t" "movq $528, %%r15\n\t" "movq $1584, %%rax\n\t" "movq $2640, %%rbx\n\t" "movq $3696, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 528(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "addq $1056, %%rdi\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "addq $16, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "subq $528, %%rsi\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 528(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 1056(%%rdx)%{%%k1%}\n\t" "addq $16, %%rdx\n\t" "subq $34832, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 26136; #endif } static void kernel_o11_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 528(%%rdx), %%zmm30\n\t" "vmovupd 1056(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 528(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1056(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 528(%%rdx)\n\t" "vmovupd %%zmm31, 1056(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $64, %%r12\n\t" "jl 33b\n\t" "movq $3, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 528(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 1056(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 528(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1056(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 528(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 1056(%%rdx)%{%%k1%}\n\t" "addq $16, %%rdx\n\t" "addq $16, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 1188; #endif } static void kernel_o12_b0(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $0, %%r14\n\t" "34:\n\t" "addq $8, %%r14\n\t" "movq $624, %%r15\n\t" "movq $1872, %%rax\n\t" "movq $3120, %%rbx\n\t" "movq $4368, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 624(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1248(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1872(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2496(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3120(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3744(%%rdi), %%zmm0\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4368(%%rdi), %%zmm1\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "addq $4992, %%rdi\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "addq $64, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "cmpq $72, %%r14\n\t" "jl 34b\n\t" "movq $624, %%r15\n\t" "movq $1872, %%rax\n\t" "movq $3120, %%rbx\n\t" "movq $4368, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 624(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1248(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1872(%%rdi), %%zmm1\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2496(%%rdi), %%zmm0\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3120(%%rdi), %%zmm1\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "addq $3744, %%rdi\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "addq $48, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "subq $624, %%rsi\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 624(%%rdx)\n\t" "vmovupd %%zmm31, 1248(%%rdx)\n\t" "addq $64, %%rdx\n\t" "subq $48608, %%rdi\n\t" "cmpq $72, %%r12\n\t" "jl 33b\n\t" "movq $63, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vpxord %%zmm29, %%zmm29, %%zmm29\n\t" "vpxord %%zmm30, %%zmm30, %%zmm30\n\t" "vpxord %%zmm31, %%zmm31, %%zmm31\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r14\n\t" "movq $624, %%r15\n\t" "movq $1872, %%rax\n\t" "movq $3120, %%rbx\n\t" "movq $4368, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 624(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1248(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1872(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2496(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3120(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 3744(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 4368(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 48(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 48(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 48(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "addq $4992, %%rdi\n\t" "vfmadd231pd 56(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 56(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 56(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "addq $64, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "cmpq $72, %%r14\n\t" "jl 33b\n\t" "movq $624, %%r15\n\t" "movq $1872, %%rax\n\t" "movq $3120, %%rbx\n\t" "movq $4368, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 624(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1248(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vmovupd 1872(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vmovupd 2496(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 24(%%rsi)%{1to8%}, %%zmm1, %%zmm20\n\t" "vfmadd231pd 24(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm21\n\t" "vfmadd231pd 24(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm22\n\t" "vmovupd 3120(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 32(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 32(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 32(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "addq $3744, %%rdi\n\t" "vfmadd231pd 40(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 40(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 40(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "addq $48, %%rsi\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "subq $624, %%rsi\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 624(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 1248(%%rdx)%{%%k1%}\n\t" "addq $48, %%rdx\n\t" "subq $48624, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 36504; #endif } static void kernel_o12_b1(const double* A, const double* B, double* C) { #ifdef __AVX512F__ __asm__ __volatile__("movq %0, %%rdi\n\t" "movq %1, %%rsi\n\t" "movq %2, %%rdx\n\t" "movq $0, %%r12\n\t" "movq $0, %%r13\n\t" "movq $0, %%r14\n\t" "33:\n\t" "addq $8, %%r12\n\t" "vmovupd 0(%%rdx), %%zmm29\n\t" "vmovupd 624(%%rdx), %%zmm30\n\t" "vmovupd 1248(%%rdx), %%zmm31\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0\n\t" "vmovupd 624(%%rdi), %%zmm1\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1248(%%rdi), %%zmm0\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)\n\t" "vmovupd %%zmm30, 624(%%rdx)\n\t" "vmovupd %%zmm31, 1248(%%rdx)\n\t" "addq $64, %%rdx\n\t" "addq $64, %%rdi\n\t" "cmpq $72, %%r12\n\t" "jl 33b\n\t" "movq $63, %%r9\n\t" "kmovw %%r9d, %%k1\n\t" "vmovupd 0(%%rdx), %%zmm29%{%%k1%}%{z%}\n\t" "vmovupd 624(%%rdx), %%zmm30%{%%k1%}%{z%}\n\t" "vmovupd 1248(%%rdx), %%zmm31%{%%k1%}%{z%}\n\t" "movq $24, %%r15\n\t" "movq $72, %%rax\n\t" "movq $120, %%rbx\n\t" "movq $168, %%r11\n\t" "vpxord %%zmm26, %%zmm26, %%zmm26\n\t" "vpxord %%zmm27, %%zmm27, %%zmm27\n\t" "vpxord %%zmm28, %%zmm28, %%zmm28\n\t" "vpxord %%zmm23, %%zmm23, %%zmm23\n\t" "vpxord %%zmm24, %%zmm24, %%zmm24\n\t" "vpxord %%zmm25, %%zmm25, %%zmm25\n\t" "vpxord %%zmm20, %%zmm20, %%zmm20\n\t" "vpxord %%zmm21, %%zmm21, %%zmm21\n\t" "vpxord %%zmm22, %%zmm22, %%zmm22\n\t" "vmovupd 0(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vmovupd 624(%%rdi), %%zmm1%{%%k1%}%{z%}\n\t" "vfmadd231pd 0(%%rsi)%{1to8%}, %%zmm0, %%zmm29\n\t" "vfmadd231pd 0(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm30\n\t" "vfmadd231pd 0(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm31\n\t" "vmovupd 1248(%%rdi), %%zmm0%{%%k1%}%{z%}\n\t" "vfmadd231pd 8(%%rsi)%{1to8%}, %%zmm1, %%zmm26\n\t" "vfmadd231pd 8(%%rsi,%%r15,1)%{1to8%}, %%zmm1, %%zmm27\n\t" "vfmadd231pd 8(%%rsi,%%r15,2)%{1to8%}, %%zmm1, %%zmm28\n\t" "vfmadd231pd 16(%%rsi)%{1to8%}, %%zmm0, %%zmm23\n\t" "vfmadd231pd 16(%%rsi,%%r15,1)%{1to8%}, %%zmm0, %%zmm24\n\t" "vfmadd231pd 16(%%rsi,%%r15,2)%{1to8%}, %%zmm0, %%zmm25\n\t" "vaddpd %%zmm26, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm27, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm28, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm23, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm24, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm25, %%zmm31, %%zmm31\n\t" "vaddpd %%zmm20, %%zmm29, %%zmm29\n\t" "vaddpd %%zmm21, %%zmm30, %%zmm30\n\t" "vaddpd %%zmm22, %%zmm31, %%zmm31\n\t" "vmovupd %%zmm29, 0(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm30, 624(%%rdx)%{%%k1%}\n\t" "vmovupd %%zmm31, 1248(%%rdx)%{%%k1%}\n\t" "addq $48, %%rdx\n\t" "addq $48, %%rdi\n\t" : : "m"(A), "m"(B), "m"(C) : "k1","rax","rbx","rcx","rdx","rdi","rsi","r8","r9","r10","r11","r12","r13","r14","r15","zmm0","zmm1","zmm2","zmm3","zmm4","zmm5","zmm6","zmm7","zmm8","zmm9","zmm10","zmm11","zmm12","zmm13","zmm14","zmm15","zmm16","zmm17","zmm18","zmm19","zmm20","zmm21","zmm22","zmm23","zmm24","zmm25","zmm26","zmm27","zmm28","zmm29","zmm30","zmm31"); #else #pragma message ("LIBXSMM KERNEL COMPILATION ERROR in: " __FILE__) #error No kernel was compiled, lacking support for current architecture? #endif #ifndef NDEBUG #ifdef _OPENMP #pragma omp atomic #endif libxsmm_num_total_flops += 1404; #endif } static void kernel_nq(const double* A, const double* B, double* C) { kernel_o2_b0(A, B, C); } static void (* const microkernels[])(const double*, const double*, double*) = { kernel_o2_b0, kernel_o2_b1, kernel_o3_b0, kernel_o3_b1, kernel_o4_b0, kernel_o4_b1, kernel_o5_b0, kernel_o5_b1, kernel_o6_b0, kernel_o6_b1, kernel_o7_b0, kernel_o7_b1, kernel_o8_b0, kernel_o8_b1, kernel_o9_b0, kernel_o9_b1, kernel_o10_b0, kernel_o10_b1, kernel_o11_b0, kernel_o11_b1, kernel_o12_b0, kernel_o12_b1 }; #endif // MICROKERNELS_H_
mpi_openmp_convergence.c
#include "mpi.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #define NXPROB 1280 /* x dimension of problem grid */ #define NYPROB 1024 /* y dimension of problem grid */ #define STEPS 500 /* number of time steps */ #define NTAG 0 /* north message tag */ #define STAG 1 /* south message tag */ #define WTAG 2 /* north message tag */ #define ETAG 3 /* east message tag */ struct Parms { float cx; float cy; } parms = {0.1, 0.1}; static inline void update ( int start_x, int end_x,int start_y, int end_y,int ny, float *u1, float *u2 ) { int b, c; #pragma parallel omp num_threads(4) for ( b = start_x; b <= end_x; b++ ) { #pragma parallel omp for schedule(static,1) for ( c = start_y; c <= end_y; c++ ) { *( u2+b*ny+c ) = *( u1+b*ny+c ) + parms.cx * ( *( u1+ ( b+1 ) *ny+c ) + *( u1+ ( b-1 ) *ny+c ) - 2.0 * *( u1+b*ny+c ) ) + parms.cy * ( *( u1+b*ny+c+1 ) + *( u1+b*ny+c-1 ) - 2.0 * *( u1+b*ny+c ) ); } } } static inline void inidat(int nx, int ny, float *u) { int b, c; for (b = 0; b <= nx-1; b++) for (c = 0; c <= ny-1; c++) *(u+b*ny+c) = (float)(b * (nx - b - 1) * c * (ny - c - 1)); } static inline void prtdat(int nx, int ny, float *u1, char *fnam) { int b, c; FILE *fp; fp = fopen(fnam, "w"); for (c = ny-1; c >= 0; c--) { for (b = 0; b <= nx-1; b++) { fprintf(fp, "%6.1f", *(u1+b*ny+c)); if (b != nx-1) fprintf(fp, " "); else fprintf(fp, "\n"); } } fclose(fp); } static inline int diff_between_elements(int a_bound,int b_bound,float u[2][a_bound][b_bound]){ int converg_i,converg_j; int local_result; int result=1; #pragma omp parallel for private(vongerg_i,converg_j,local_result) reduction(&:result) for(converg_i=1; converg_i<a_bound; converg_i++){ local_result=1; for(converg_j=1; converg_j<b_bound; converg_j++){ local_result=local_result&(fabs(u[0][converg_i][converg_j]-u[1][converg_i][converg_j]) * 100/u[1][converg_i][converg_j])<=5; } result=result&local_result; } return result; } int main (int argc, char *argv[]){ int taskid, /* this task's unique id */ numtasks, /* number of tasks */ north, south,west,east, /* neighbor tasks */ rc=0, /* misc */ i,x,y,dimensions[2],periods[2],data_dimensions[2],new_old,send_convergence,receive_convergence, a,b,c; /* dimension variables */ MPI_Datatype row,col; /* datatypes */ MPI_Comm cartesian; /*cartesian communicator */ double start,end,time; /* timers */ MPI_Request north_receive[2],south_receive[2],west_receive[2],east_receive[2], north_send[2],south_send[2],west_send[2],east_send[2]; /* request variables */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&numtasks); MPI_Comm_rank(MPI_COMM_WORLD,&taskid); if ((NYPROB*NXPROB)%numtasks!=0) { MPI_Abort(MPI_COMM_WORLD,rc); exit(-1); } /* initialaation */ dimensions[0]=dimensions[1]=0; MPI_Dims_create(numtasks,2,dimensions); periods[0]=periods[1]=0; MPI_Cart_create(MPI_COMM_WORLD,2,dimensions,periods,1,&cartesian); MPI_Barrier(cartesian); /* wait for all */ MPI_Cart_shift(cartesian,0,1,&north,&south); MPI_Cart_shift(cartesian,1,1,&west,&east); /* create cartesian grid ang get neighbhors */ data_dimensions[0]=data_dimensions[1]=0; MPI_Dims_create(NXPROB*NYPROB/numtasks,2,data_dimensions); x=data_dimensions[0]+2; y=data_dimensions[0]+2; float (*data)[x][y]=calloc(2,sizeof(*data)); for (a=0; a<2; a++) for (b=0; b<x; b++) for (c=0; c<y; c++) data[a][b][c] = 0.0; inidat(x,y,&data[0][0][0]); /* get table dimensions */ MPI_Barrier(cartesian); /* wait for all */ MPI_Type_contiguous(x,MPI_FLOAT,&row); MPI_Type_commit(&row); MPI_Type_vector(y,1,x,MPI_FLOAT,&col); MPI_Type_commit(&col); /* make datatypes */ MPI_Send_init(&data[0][1][0],1,row,north,STAG,cartesian,&north_send[0]); MPI_Recv_init(&data[0][0][0],1,row,north,NTAG,cartesian,&north_receive[0]); MPI_Send_init(&data[0][x-2][0],1,row,south,NTAG,cartesian,&south_send[0]); MPI_Recv_init(&data[0][x-1][0],1,row,south,STAG,cartesian,&south_receive[0]); MPI_Send_init(&data[0][0][1],1,col,west,ETAG,cartesian,&west_send[0]); MPI_Recv_init(&data[0][0][0],1,col,west,WTAG,cartesian,&west_receive[0]); MPI_Send_init(&data[0][0][y-2],1,col,east,WTAG,cartesian,&east_send[0]); MPI_Recv_init(&data[0][0][y-1],1,col,east,ETAG,cartesian,&east_receive[0]); MPI_Send_init(&data[1][1][0],1,row,north,STAG,cartesian,&north_send[1]); MPI_Recv_init(&data[1][0][0],1,row,north,NTAG,cartesian,&north_receive[1]); MPI_Send_init(&data[1][x-2][0],1,row,south,NTAG,cartesian,&south_send[1]); MPI_Recv_init(&data[1][x-1][0],1,row,south,STAG,cartesian,&south_receive[1]); MPI_Send_init(&data[1][0][1],1,col,west,ETAG,cartesian,&west_send[1]); MPI_Recv_init(&data[1][0][0],1, col,west,WTAG,cartesian,&west_receive[1]); MPI_Send_init(&data[1][0][y-2],1,col,east,WTAG,cartesian,&east_send[1]); MPI_Recv_init(&data[1][0][y-1],1,col,east,ETAG,cartesian,&east_receive[1]); /* initialaze communications,as we will always send and receive from the same table */ start = MPI_Wtime(); /* start time */ new_old=0; for(i=0; i<=STEPS; i++) { //First of all, we are going to make the connections by first sending data and then receiving MPI_Start(&north_send[new_old]); MPI_Start(&south_send[new_old]); MPI_Start(&west_send[new_old]); MPI_Start(&east_send[new_old]); MPI_Start(&north_receive[new_old]); MPI_Start(&south_receive[new_old]); MPI_Start(&west_receive[new_old]); MPI_Start(&east_receive[new_old]); update(2,x-3,2,y-3,x,&data[new_old][0][0],&data[1-new_old][0][0]); //And now we are going to wait for the needed borders for hallow points MPI_Wait(&east_receive[new_old],MPI_STATUS_IGNORE); MPI_Wait(&west_receive[new_old],MPI_STATUS_IGNORE); MPI_Wait(&north_receive[new_old],MPI_STATUS_IGNORE); MPI_Wait(&south_receive[new_old],MPI_STATUS_IGNORE); //Now that we have received the needed borders for the hallow points, we are going to update them update (1,x-2,1,1,x,&data[new_old][0][0],&data[1-new_old][0][0] ); update (1,x-2,y-2,y-2,x,&data[new_old][0][0],&data[1-new_old][0][0] ); update (1,1,1,y-2,x,&data[new_old][0][0],&data[1-new_old][0][0] ); update (x-2,x-2,1,y-2,x,&data[new_old][0][0],&data[1-new_old][0][0] ); //We wait for the sends finish MPI_Wait(&east_send[new_old],MPI_STATUS_IGNORE); MPI_Wait(&west_send[new_old],MPI_STATUS_IGNORE); MPI_Wait(&north_send[new_old],MPI_STATUS_IGNORE); MPI_Wait(&south_send[new_old],MPI_STATUS_IGNORE); //get the new table for the next iteration new_old=1-new_old; //we check the convergence every 5 steps if(i%5==0){ send_convergence=diff_between_elements(x-2,y-2,data); MPI_Allreduce(&send_convergence,&receive_convergence,1,MPI_INT,MPI_LAND,cartesian); if(receive_convergence==1){//an den yparxei diafora metaksy twn timwn break; } } } end= MPI_Wtime()-start; MPI_Reduce(&end,&time, 1,MPI_DOUBLE,MPI_MAX,0,cartesian); /* get time */ if (taskid ==0) { printf("Time: %f\n",time); } MPI_Type_free(&row); MPI_Type_free(&col); free(data); /* free allocated datatypes and memory */ MPI_Finalize(); }
tree-vectorizer.h
/* Vectorizer Copyright (C) 2003-2018 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to implement: for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* Use a folding reduction within the loop to implement: for (int i = 0; i < VF; ++i) res = res OP val[i]; (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* Structure to encapsulate information about a group of like instructions to be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; gimple *stmt; int misalign; }; typedef vec<stmt_info_for_cost> stmt_vector_for_cost; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ typedef hash_map<tree_operand_hash, innermost_loop_behavior *> vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* A computation tree of an SLP instance. Each node corresponds to a group of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec<slp_tree> children; /* A group of scalar stmts to be vectorized together. */ vec<gimple *> stmts; /* Load permutation relative to the stores, NULL if there is no permutation. */ vec<unsigned> load_permutation; /* Vectorized stmt/s. */ vec<gimple *> vec_stmts; /* Number of vector stmts that are created to replace the group of scalar stmts. It is calculated during the transformation phase as the number of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector size. */ unsigned int vec_stmts_size; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec<slp_tree> loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* Describes two objects whose addresses must be unequal for the vectorized loop to be valid. */ typedef std::pair<tree, tree> vec_object_pair; /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound () {} vec_lower_bound (tree e, bool u, poly_uint64 m) : expr (e), unsigned_p (u), min_value (m) {} tree expr; bool unsigned_p; poly_uint64 min_value; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info (vec_kind, void *); ~vec_info (); /* The type of vectorization. */ vec_kind kind; /* All SLP instances. */ auto_vec<slp_instance> slp_instances; /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec<data_reference_p> datarefs; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ vec_base_alignments base_alignments; /* All data dependences. Freed by free_dependence_relations, so not an auto_vec. */ vec<ddr_p> ddrs; /* All interleaving chains of stores, represented by the first stmt in the chain. */ auto_vec<gimple *> grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; }; struct _loop_vec_info; struct _bb_vec_info; template<> template<> inline bool is_a_helper <_loop_vec_info *>::test (vec_info *i) { return i->kind == vec_info::loop; } template<> template<> inline bool is_a_helper <_bb_vec_info *>::test (vec_info *i) { return i->kind == vec_info::bb; } /* In general, we can divide the vector statements in a vectorized loop into related groups ("rgroups") and say that for each rgroup there is some nS such that the rgroup operates on nS values from one scalar iteration followed by nS values from the next. That is, if VF is the vectorization factor of the loop, the rgroup operates on a sequence: (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) where (i,j) represents a scalar value with index j in a scalar iteration with index i. [ We use the term "rgroup" to emphasise that this grouping isn't necessarily the same as the grouping of statements used elsewhere. For example, if we implement a group of scalar loads using gather loads, we'll use a separate gather load for each scalar load, and thus each gather load will belong to its own rgroup. ] In general this sequence will occupy nV vectors concatenated together. If these vectors have nL lanes each, the total number of scalar values N is given by: N = nS * VF = nV * nL None of nS, VF, nV and nL are required to be a power of 2. nS and nV are compile-time constants but VF and nL can be variable (if the target supports variable-length vectors). In classical vectorization, each iteration of the vector loop would handle exactly VF iterations of the original scalar loop. However, in a fully-masked loop, a particular iteration of the vector loop might handle fewer than VF iterations of the scalar loop. The vector lanes that correspond to iterations of the scalar loop are said to be "active" and the other lanes are said to be "inactive". In a fully-masked loop, many rgroups need to be masked to ensure that they have no effect for the inactive lanes. Each such rgroup needs a sequence of booleans in the same order as above, but with each (i,j) replaced by a boolean that indicates whether iteration i is active. This sequence occupies nV vector masks that again have nL lanes each. Thus the mask sequence as a whole consists of VF independent booleans that are each repeated nS times. We make the simplifying assumption that if a sequence of nV masks is suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing it. This holds for all current targets that support fully-masked loops. For example, suppose the scalar loop is: float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * 2 + 1] += 2.0f; d[i] += 3.0; } and suppose that vectors have 256 bits. The vectorized f accesses will belong to one rgroup and the vectorized d access to another: f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 [ In this simple example the rgroups do correspond to the normal SLP grouping scheme. ] If only the first three lanes are active, the masks we need are: f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 Here we can use a mask calculated for f's rgroup for d's, but not vice versa. Thus for each value of nV, it is enough to provide nV masks, with the mask being calculated based on the highest nL (or, equivalently, based on the highest nS) required by any rgroup with that nV. We therefore represent the entire collection of masks as a two-level table, with the first level being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being indexed by the mask index 0 <= i < nV. */ /* The masks needed by rgroups with nV vectors, according to the description above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec<tree> masks; }; typedef auto_vec<rgroup_masks> vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info : public vec_info { _loop_vec_info (struct loop *); ~_loop_vec_info (); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* Threshold of number of iterations below which vectorzation will not be performed. It is calculated from MIN_PROFITABLE_ITERS and PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* When applying loop versioning, the vector form should only be used if the number of scalar iterations is >= this value, on top of all the other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* The masks that a fully-masked loop should use to avoid operating on inactive scalars. */ vec_loop_masks masks; /* If we are using a loop mask to align memory addresses, this variable contains the number of vector elements that we should skip in the first iteration of the vector loop (i.e. the number of leading elements that should be false in the first mask). */ tree mask_skip_niters; /* Type of the variables to use in the WHILE_ULT call for fully-masked loops. */ tree mask_compare_type; /* Unknown DRs according to which loop was peeled. */ struct data_reference *unaligned_dr; /* peeling_for_alignment indicates whether peeling for alignment will take place, and what the peeling factor should be: peeling_for_alignment = X means: If X=0: Peeling for alignment will not be applied. If X>0: Peel first X iterations. If X=-1: Generate a runtime test to calculate the number of iterations to be peeled, using the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* The loop nest in which the data dependences are computed. */ auto_vec<loop_p> loop_nest; /* Data Dependence Relations defining address ranges that are candidates for a run-time aliasing check. */ auto_vec<ddr_p> may_alias_ddrs; /* Data Dependence Relations defining address ranges together with segment lengths from which the run-time aliasing check is built. */ auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec<vec_object_pair> check_unequal_addrs; /* List of values that are required to be nonzero. This is used to check whether things like "x[i * n] += 1;" are safe and eventually gets added to the checks for lower bounds below. */ auto_vec<tree> check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec<vec_lower_bound> lower_bounds; /* Statements in the loop that have data references that are candidates for a runtime (loop versioning) misalignment check. */ auto_vec<gimple *> may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec<gimple *> reductions; /* All reduction chains in the loop, represented by the first stmt in the chain. */ auto_vec<gimple *> reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec<stmt_info_for_cost> scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map<tree_operand_hash, tree> *ivexpr_map; /* The unrolling factor needed to SLP the loop. In case of that pure SLP is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* When we have grouped data accesses with gaps, we may introduce invalid memory accesses. We peel the last iteration of the loop to prevent this. */ bool peeling_for_gaps; /* When the number of iterations is not a multiple of the vector size we need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* Reductions are canonicalized so that the last operand is the reduction operand. If this places a constant into RHS1, this decanonicalizes GIMPLE for other phases, so we must track when this has occurred and fix it up. */ bool operands_swapped; /* True if there are no loop carried data dependencies in the loop. If loop->safelen <= 1, then this is always true, either the loop didn't have any loop carried data dependencies, or the loop is being vectorized guarded with some runtime alias checks, or couldn't be vectorized at all, but then this field shouldn't be used. For loop->safelen >= 2, the user has asserted that there are no backward dependencies, but there still could be loop carried forward dependencies in such loops. This flag will be false if normal vectorizer data dependency analysis would fail or require versioning for alias, but because of loop->safelen >= 2 it has been vectorized even without versioning for alias. E.g. in: #pragma omp simd for (int i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, but without safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* If if-conversion versioned this loop before conversion, this is the loop version without if-conversion. */ struct loop *scalar_loop; /* For loops being epilogues of already vectorized loops this points to the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->datarefs #define LOOP_VINFO_DDRS(L) (L)->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) static inline loop_vec_info loop_vec_info_for_loop (struct loop *loop) { return (loop_vec_info) loop->aux; } static inline bool nested_in_vect_loop_p (struct loop *loop, gimple *stmt) { return (loop->inner && (loop->inner == (gimple_bb (stmt))->loop_father)); } typedef struct _bb_vec_info : public vec_info { _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator); ~_bb_vec_info (); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->datarefs #define BB_VINFO_DDRS(B) (B)->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb (basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* The def is in the inner loop, and the use is in the outer loop, and the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* The def is in the inner loop, and the use is in the outer loop (and is not part of reduction). */ vect_used_in_outer, /* defs that feed computations that end up (only) in a reduction. These defs may be used by non-reduction stmts, but eventually, any computations/values that are affected by these defs are used to compute a reduction (i.e. don't get stored to memory, for example). We use this to identify computations that we can change the order in which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* Says whether a statement is a load, a store of a vectorized statement result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* Describes how we're going to vectorize an individual load or store, or a group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* A contiguous access that goes down in memory rather than up, with no additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* A simple contiguous access in which the elements need to be permuted after loading or before storing. Only used for loop vectorization; SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* A simple contiguous access in which the elements need to be reversed after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* An access in which each scalar element is loaded or stored individually. */ VMAT_ELEMENTWISE, /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP accesses. Each unrolled iteration uses a contiguous load or store for the whole group, but the groups from separate iterations are combined in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; typedef struct data_reference *dr_p; typedef struct _stmt_vec_info { enum stmt_vec_info_type type; /* Indicates whether this stmts is part of a computation whose result is used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* Is this statement vectorizable or should it be skipped in (partial) vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ gimple *vectorized_stmt; /* The following is relevant only for stmts that contain a non-scalar data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have at most one such data-ref. */ /* Information about the data-ref (access function, etc), relative to the inner-most containing loop. */ struct data_reference *data_ref_info; /* Information about the data-ref relative to this loop nest (the loop that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* For loop PHI nodes, the base and evolution part of it. This makes sure this information is still available in vect_update_ivs_after_vectorizer where we may not be able to re-analyze the PHI nodes evolution as peeling for the prologue loop can make it unanalyzable. The evolution part is still correct after peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* Used for various bookkeeping purposes, generally holding a pointer to some other stmt S that is in some way "related" to this stmt. Current use of this field is: If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is true): S is the "pattern stmt" that represents (and replaces) the sequence of stmts that constitutes the pattern. Similarly, the related_stmt of the "pattern stmt" points back to this stmt (which is the last stmt in the original sequence of stmts that constitutes the pattern). */ gimple *related_stmt; /* Used to keep a sequence of def stmts of a pattern stmt if such exists. */ gimple_seq pattern_def_seq; /* List of datarefs that are known to have the same alignment as the dataref of this stmt. */ vec<dr_p> same_align_refs; /* Selected SIMD clone's function info. First vector element is SIMD clone's function decl, followed by a pair of trees (base + step) for linear arguments (pair of NULLs for other arguments). */ vec<tree> simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ gimple *first_element; /* Pointer to the next element in the group. */ gimple *next_element; /* For data-refs, in case that two or more stmts share data-ref, this is the pointer to the previously detected stmt with the same dr. */ gimple *same_dr_stmt; /* The size of the group. */ unsigned int size; /* For stores, number of stores from this group seen. We vectorize the last one. */ unsigned int store_count; /* For loads only, the gap from the previous load. For consecutive loads, GAP is 1. */ unsigned int gap; /* The minimum negative dependence distance this stmt participates in or zero if none. */ unsigned int min_neg_dist; /* Not all stmts in the loop need to be vectorized. e.g, the increment of the loop induction variable and computation of array indexes. relevant indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* Classifies how the load or store is going to be implemented for loop vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* On a reduction PHI the reduction type as detected by vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* On a reduction PHI the def returned by vect_force_simple_reduction. On the def returned by vect_force_simple_reduction the corresponding PHI. */ gimple *reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; } *stmt_vec_info; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* The internal function to use for the gather/scatter operation, or IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* The FUNCTION_DECL for the built-in gather/scatter function, or null if an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* Each offset element should be multiplied by this amount before being added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) (S)->data_ref_info #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element #define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element #define STMT_VINFO_GROUP_SIZE(S) (S)->size #define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count #define STMT_VINFO_GROUP_GAP(S) (S)->gap #define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt #define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define GROUP_FIRST_ELEMENT(S) (S)->first_element #define GROUP_NEXT_ELEMENT(S) (S)->next_element #define GROUP_SIZE(S) (S)->size #define GROUP_STORE_COUNT(S) (S)->store_count #define GROUP_GAP(S) (S)->gap #define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type struct dataref_aux { /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* The byte alignment that we'd ideally like the reference to have, and the value that misalignment is measured against. */ int target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; #define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux) #define VECT_MAX_COST 1000 /* The maximum number of intermediate steps required in multi-step type conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* Nonzero if TYPE represents a (scalar) boolean type or type in the middle-end compatible with it (unsigned precision 1 integral types). Used to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) extern vec<stmt_vec_info> stmt_vec_info_vec; void init_stmt_vec_info_vec (void); void free_stmt_vec_info_vec (void); /* Return a stmt_vec_info corresponding to STMT. */ static inline stmt_vec_info vinfo_for_stmt (gimple *stmt) { int uid = gimple_uid (stmt); if (uid <= 0) return NULL; return stmt_vec_info_vec[uid - 1]; } /* Set vectorizer information INFO for STMT. */ static inline void set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info) { unsigned int uid = gimple_uid (stmt); if (uid == 0) { gcc_checking_assert (info); uid = stmt_vec_info_vec.length () + 1; gimple_set_uid (stmt, uid); stmt_vec_info_vec.safe_push (info); } else { gcc_checking_assert (info == NULL); stmt_vec_info_vec[uid - 1] = info; } } /* Return the earlier statement between STMT1 and STMT2. */ static inline gimple * get_earlier_stmt (gimple *stmt1, gimple *stmt2) { unsigned int uid1, uid2; if (stmt1 == NULL) return stmt2; if (stmt2 == NULL) return stmt1; uid1 = gimple_uid (stmt1); uid2 = gimple_uid (stmt2); if (uid1 == 0 || uid2 == 0) return NULL; gcc_checking_assert (uid1 <= stmt_vec_info_vec.length () && uid2 <= stmt_vec_info_vec.length ()); if (uid1 < uid2) return stmt1; else return stmt2; } /* Return the later statement between STMT1 and STMT2. */ static inline gimple * get_later_stmt (gimple *stmt1, gimple *stmt2) { unsigned int uid1, uid2; if (stmt1 == NULL) return stmt2; if (stmt2 == NULL) return stmt1; uid1 = gimple_uid (stmt1); uid2 = gimple_uid (stmt2); if (uid1 == 0 || uid2 == 0) return NULL; gcc_assert (uid1 <= stmt_vec_info_vec.length ()); gcc_assert (uid2 <= stmt_vec_info_vec.length ()); if (uid1 > uid2) return stmt1; else return stmt2; } /* Return TRUE if a statement represented by STMT_INFO is a part of a pattern. */ static inline bool is_pattern_stmt_p (stmt_vec_info stmt_info) { gimple *related_stmt; stmt_vec_info related_stmt_info; related_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (related_stmt && (related_stmt_info = vinfo_for_stmt (related_stmt)) && STMT_VINFO_IN_PATTERN_P (related_stmt_info)) return true; return false; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p (basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2 (int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost (type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost (type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost (struct loop *loop_info) { return targetm.vectorize.init_cost (loop_info); } /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { return targetm.vectorize.add_stmt_cost (data, count, kind, stmt_info, misalign, where); } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost (void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data (void *data) { targetm.vectorize.destroy_cost_data (data); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ inline void set_dr_misalignment (struct data_reference *dr, int val) { dataref_aux *data_aux = DR_VECT_AUX (dr); if (!data_aux) { data_aux = XCNEW (dataref_aux); dr->aux = data_aux; } data_aux->misalignment = val; } inline int dr_misalignment (struct data_reference *dr) { return DR_VECT_AUX (dr)->misalignment; } /* Reflects actual alignment of first access in the vectorized loop, taking into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) #define DR_MISALIGNMENT_UNKNOWN (-1) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) DR_VECT_AUX (DR)->target_alignment /* Return true if data access DR is aligned to its target alignment (which may be less than a full vector). */ static inline bool aligned_access_p (struct data_reference *data_ref_info) { return (DR_MISALIGNMENT (data_ref_info) == 0); } /* Return TRUE if the alignment of the data access is known, and FALSE otherwise. */ static inline bool known_alignment_for_access_p (struct data_reference *data_ref_info) { return (DR_MISALIGNMENT (data_ref_info) != DR_MISALIGNMENT_UNKNOWN); } /* Return the minimum alignment in bytes that the vectorized version of DR is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes (struct data_reference *dr) { if (DR_MISALIGNMENT (dr) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr))); if (DR_MISALIGNMENT (dr) == 0) return DR_TARGET_ALIGNMENT (dr); return DR_MISALIGNMENT (dr) & -DR_MISALIGNMENT (dr); } /* Return the behavior of DR with respect to the vectorization context (which for outer loop vectorization might not be the behavior recorded in DR itself). */ static inline innermost_loop_behavior * vect_dr_behavior (data_reference *dr) { gimple *stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt)) return &DR_INNERMOST (dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model (loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* Return true if the loop described by LOOP_VINFO is fully-masked and if the first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); } /* Return the number of vectors of type VECTYPE that are needed to get NUNITS elements. NUNITS should be based on the vectorization factor, so it is always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors (poly_uint64 nunits, tree vectype) { return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant (); } /* Return the number of copies needed for loop vectorization when a statement operates on vectors of type VECTYPE. This is the vectorization factor divided by the number of elements in VECTYPE and is always known at compile time. */ static inline unsigned int vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype); } /* Update maximum unit count *MAX_NUNITS so that it accounts for the number of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet recorded any vector types. */ static inline void vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype) { /* All unit counts have the form current_vector_size * X for some rational X, so two unit sizes must have a common multiple. Everything is a multiple of the initial value of 1. */ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); *max_nunits = force_common_multiple (*max_nunits, nunits); } /* Return the vectorization factor that should be used for costing purposes while vectorizing the loop described by LOOP_VINFO. Pick a reasonable estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost (loop_vec_info loop_vinfo) { return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); } /* Estimate the number of elements in VEC_TYPE for costing purposes. Pick a reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost (tree vec_type) { return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf (loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* Return the size of the value accessed by unvectorized data reference DR. This is only valid once STMT_VINFO_VECTYPE has been calculated for the associated gimple statement, since that guarantees that DR accesses either a scalar or a scalar equivalent. ("Scalar equivalent" here includes things like V1SI, which can be vectorized in the same way as a plain SI.) */ inline unsigned int vect_get_scalar_dr_size (struct data_reference *dr) { return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)))); } /* Source location */ extern source_location vect_location; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition (struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, struct loop *, edge); extern void vect_loop_versioning (loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop *vect_do_peeling (loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels (loop_vec_info); extern source_location find_loop_location (struct loop *); extern bool vect_can_advance_ivs_p (loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type (tree); extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64); extern tree get_mask_type_for_scalar_type (tree); extern tree get_same_sized_vectype (tree, tree); extern bool vect_get_loop_mask_type (loop_vec_info); extern bool vect_is_simple_use (tree, vec_info *, gimple **, enum vect_def_type *); extern bool vect_is_simple_use (tree, vec_info *, gimple **, enum vect_def_type *, tree *); extern bool supportable_widening_operation (enum tree_code, gimple *, tree, tree, enum tree_code *, enum tree_code *, int *, vec<tree> *); extern bool supportable_narrowing_operation (enum tree_code, tree, tree, enum tree_code *, int *, vec<tree> *); extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *); extern void free_stmt_vec_info (gimple *stmt); extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *, int, stmt_vector_for_cost *, stmt_vector_for_cost *); extern void vect_model_store_cost (stmt_vec_info, int, vect_memory_access_type, vec_load_store_type, slp_tree, stmt_vector_for_cost *, stmt_vector_for_cost *); extern void vect_model_load_cost (stmt_vec_info, int, vect_memory_access_type, slp_tree, stmt_vector_for_cost *, stmt_vector_for_cost *); extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern void vect_finish_replace_stmt (gimple *, gimple *); extern void vect_finish_stmt_generation (gimple *, gimple *, gimple_stmt_iterator *); extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info); extern tree vect_get_store_rhs (gimple *); extern tree vect_get_vec_def_for_operand_1 (gimple *, enum vect_def_type); extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL); extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *, vec<tree> *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy (enum vect_def_type *, vec<tree> *, vec<tree> *); extern tree vect_init_vector (gimple *, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree); extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *, bool *, slp_tree, slp_instance); extern void vect_remove_stores (gimple *); extern bool vect_analyze_stmt (gimple *, bool *, slp_tree, slp_instance); extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *, gimple **, tree, int, slp_tree); extern void vect_get_load_cost (struct data_reference *, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost (struct data_reference *, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift (enum tree_code, tree); extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &); extern void optimize_mask_stores (struct loop*); extern gcall *vect_gen_while (tree, tree, tree); extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int); extern enum dr_alignment_support vect_supportable_dr_alignment (struct data_reference *, bool); extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *, HOST_WIDE_INT *); extern bool vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence (slp_instance); extern bool vect_enhance_data_refs_alignment (loop_vec_info); extern bool vect_analyze_data_refs_alignment (loop_vec_info); extern bool vect_verify_datarefs_alignment (loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); extern bool vect_analyze_data_ref_accesses (vec_info *); extern bool vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter (gimple *, loop_vec_info, gather_scatter_info *); extern bool vect_analyze_data_refs (vec_info *, poly_uint64 *); extern void vect_record_base_alignments (vec_info *); extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, bool *, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *, tree); extern void vect_copy_ref_info (tree, tree); extern tree vect_create_destination_var (tree, tree); extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *, gimple_stmt_iterator *, vec<tree> *); extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load (gimple *, vec<tree> , int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors (gimple *, vec<tree> ); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path (location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern loop_vec_info vect_analyze_loop (struct loop *, loop_vec_info); extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits (tree); extern tree vect_double_mask_nunits (tree); extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop (loop_vec_info); extern loop_vec_info vect_analyze_loop_form (struct loop *); extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *, slp_tree, int, gimple **); extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *, gimple **, slp_tree, slp_instance); extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, gimple **, slp_tree); extern tree get_initial_def_for_reduction (gimple *, tree, tree *); extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance (slp_instance); extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations (vec_info *); extern bool vect_schedule_slp (vec_info *); extern bool vect_analyze_slp (vec_info *, unsigned); extern bool vect_make_slp_decision (loop_vec_info); extern void vect_detect_hybrid_slp (loop_vec_info); extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *); extern bool vect_slp_bb (basic_block); extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree); extern bool is_simple_and_all_uses_invariant (gimple *, loop_vec_info); extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode, unsigned int * = NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>, unsigned int, vec<tree> &); extern int vect_get_place_in_interleaving_chain (gimple *, gimple *); /* In tree-vect-patterns.c. */ /* Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future. */ typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *); #define NUM_PATTERNS 15 void vect_pattern_recog (vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops (void); bool vect_stmt_in_region_p (vec_info *, gimple *); void vect_free_loop_info_assumptions (struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
fwdTimeCpmlFor2dAw_openmp_mex.c
/* ====================================================================== * * fwdTimeCpmlFor2dAw_mex.c * * Simulates 2-d acoustic wave forward propagation using finite difference * in time domain with partial differential equation (PDE) * * This C source file is free for use in academic research. * All rights reserved. * * * Written by Lingchen Zhu (zhulingchen@gmail.com) * Center for Signal and Information Processing, Center for Energy & Geo Processing * Georgia Institute of Technology * * ====================================================================== */ #include "mex.h" #include <omp.h> #include "finiteDifference.h" #include <math.h> #include <string.h> /* input arguments */ #define VM_IN prhs[0] #define SOURCE_IN prhs[1] #define DIFFORDER_IN prhs[2] #define BOUNDARY_IN prhs[3] #define DZ_IN prhs[4] #define DX_IN prhs[5] #define DT_IN prhs[6] /*#define TEST_IN prhs[7]*/ /* in argument for test */ /* output arguments */ #define DATA_OUT plhs[0] #define SNAPSHOT_OUT plhs[1] /*#define TEST_OUT plhs[2]*/ /* out argument for test */ /* the gateway routine */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* begin of declaration */ double *pVelocityModel, *pSource, *pData, *pSnapshot; double dz, dx, dt; int diffOrder, boundary; /* test */ /*double *pTestIn;*/ int l, i, j, t; mwSize nz, nx, nt; const mwSize *pDimsSource; mwSize pDimsSnapshot[3] = {0}; double *pCoeff, *pOldFdm, *pCurFdm, *pNewFdm; double *puDampLeft, *pvDampLeft, *puDampRight, *pvDampRight, *puDampDown, *pvDampDown; double *pxDampLeft, *pxDampRight, *pxDamp, *pxb, *pzDampDown, *pzDamp, *pzb; double *pVdtSq; double *pzPhi, *pxPhi, *pzA, *pxA, *pzPsi, *pxPsi, *pzP, *pxP; double *pCurFdm_diffIn_zPhi, *pCurFdm_diffOut_zPhi, *pCurFdm_diffIn_xPhi, *pCurFdm_diffOut_xPhi; double *pCurFdm_diffIn_zA, *pCurFdm_diffOut_zA, *pCurFdm_diffIn_xA, *pCurFdm_diffOut_xA; double *pzA_diffIn, *pzA_diffOut, *pxA_diffIn, *pxA_diffOut; /* end of declaration */ /* test begin */ /*pTestIn = mxGetPr(TEST_IN);*/ /* test end */ if (nrhs < 7) mexErrMsgTxt("All 7 input arguments shall be provided!"); /* ATTENTION: mxGetPr might just produce a 1D array that is linearized according to Matlab convention (column order) */ pVelocityModel = mxGetPr(VM_IN); pSource = mxGetPr(SOURCE_IN); diffOrder = *mxGetPr(DIFFORDER_IN); boundary = *mxGetPr(BOUNDARY_IN); dz = *mxGetPr(DZ_IN); dx = *mxGetPr(DX_IN); dt = *mxGetPr(DT_IN); pDimsSource = mxGetDimensions(SOURCE_IN); nz = pDimsSource[0]; nx = pDimsSource[1]; nt = pDimsSource[2]; mxAssert(nz == mxGetM(VM_IN), "Velocity model and source grids should have the same z-axis grids!"); mxAssert(nx == mxGetN(VM_IN), "Velocity model and source grids should have the same x-axis grids!"); /* initialize storage */ DATA_OUT = mxCreateDoubleMatrix(nx, nt, mxREAL); pData = mxGetPr(DATA_OUT); pDimsSnapshot[0] = nz; pDimsSnapshot[1] = nx; pDimsSnapshot[2] = nt; SNAPSHOT_OUT = mxCreateNumericArray(3, pDimsSnapshot, mxDOUBLE_CLASS, mxREAL); pSnapshot = mxGetPr(SNAPSHOT_OUT); pCoeff = dCoef(diffOrder, "s"); l = 2 * diffOrder - 1; /* damp profile of x-axis */ puDampLeft = (double*)mxCalloc(nz * boundary, sizeof(double)); for (j = 0; j < boundary; j++) for (i = 0; i < nz; i++) puDampLeft[j * nz + i] = (boundary - j) * dx; pvDampLeft = (double*)mxCalloc(nz * boundary, sizeof(double)); memcpy(pvDampLeft, pVelocityModel, sizeof(double) * nz * boundary); pxDampLeft = dampPml(puDampLeft, pvDampLeft, nz, boundary, boundary * dx); puDampRight = (double*)mxCalloc(nz * boundary, sizeof(double)); for (j = 0; j < boundary; j++) for (i = 0; i < nz; i++) puDampRight[j * nz + i] = (j + 1) * dx; pvDampRight = (double*)mxCalloc(nz * boundary, sizeof(double)); memcpy(pvDampRight, pVelocityModel + (nx-boundary) * nz, sizeof(double) * nz * boundary); pxDampRight = dampPml(puDampRight, pvDampRight, nz, boundary, boundary * dx); pxDamp = (double*)mxCalloc(nz * nx, sizeof(double)); memcpy(pxDamp, pxDampLeft, sizeof(double) * nz * boundary); memcpy(pxDamp + (nx-boundary) * nz, pxDampRight, sizeof(double) * nz * boundary); pxb = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pxb[j * nz + i] = exp(-pxDamp[j * nz + i] * dt); /* damp profile of z-axis */ puDampDown = (double*)mxCalloc(boundary * nx, sizeof(double)); for (j = 0; j < nx; j++) for(i = 0; i < boundary; i++) puDampDown[j * boundary + i] = (i + 1) * dz; pvDampDown = (double*)mxCalloc(boundary * nx, sizeof(double)); for (j = 0; j < nx; j++) for(i = 0; i < boundary; i++) pvDampDown[j * boundary + i] = pVelocityModel[j * nz + (nz - boundary + i)]; pzDampDown = dampPml(puDampDown, pvDampDown, boundary, nx, boundary * dz); pzDamp = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = nz-boundary; i < nz; i++) pzDamp[j * nz + i] = pzDampDown[j * boundary + i-(nz-boundary)]; pzb = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pzb[j * nz + i] = exp(-pzDamp[j * nz + i] * dt); /* ====================================================================== * 2-D Acoustic Wave Forward-Time Modeling * ====================================================================== */ /* additional arrays for storage intermediate results */ /* fdm(:, :, 1) - oldFdm; fdm(:, :, 2) - curFdm; fdm(:, :, 3) - newFdm */ pOldFdm = (double*)mxCalloc((nz+2*l) * (nx+2*l), sizeof(double)); pCurFdm = (double*)mxCalloc((nz+2*l) * (nx+2*l), sizeof(double)); pNewFdm = (double*)mxCalloc((nz+2*l) * (nx+2*l), sizeof(double)); pzPhi = (double*)mxCalloc((nz+2*l) * nx, sizeof(double)); pxPhi = (double*)mxCalloc(nz * (nx+2*l), sizeof(double)); pzA = (double*)mxCalloc((nz+2*l) * nx, sizeof(double)); pxA = (double*)mxCalloc(nz * (nx+2*l), sizeof(double)); pzPsi = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pxPsi = (double*)mxCalloc(nz * (nx+l), sizeof(double)); pzP = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pxP = (double*)mxCalloc(nz * (nx+l), sizeof(double)); pVdtSq = (double*)mxCalloc(nz * nx, sizeof(double)); for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pVdtSq[j * nz + i] = (pVelocityModel[j * nz + i] * dt) * (pVelocityModel[j * nz + i] * dt); pCurFdm_diffIn_zPhi = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pCurFdm_diffIn_xPhi = (double*)mxCalloc(nz * (nx+l), sizeof(double)); pCurFdm_diffIn_zA = (double*)mxCalloc((nz+2*l) * nx, sizeof(double)); pCurFdm_diffIn_xA = (double*)mxCalloc(nz * (nx+2*l), sizeof(double)); pzA_diffIn = (double*)mxCalloc((nz+l) * nx, sizeof(double)); pxA_diffIn = (double*)mxCalloc(nz * (nx+l), sizeof(double)); /* * izi = l:(nz+l-1); len: nz * ixi = l:(nx+l-1); len: nx * izl = (diffOrder-1):(nz+2*l-diffOrder-1); len: nz+l * ixl = (diffOrder-1):(nx+2*l-diffOrder-1); len: nx+l */ for (t = 0; t < nt; t++) { /* zPhi(izi, :) = zb .* zPhi(izi, :) + (zb - 1) .* diffOperator(fdm(izl+1, ixi, 2), coeff, dz, 1); */ for (j = l; j < nx+l; j++) for (i = diffOrder; i < nz+2*l-diffOrder+1; i++) pCurFdm_diffIn_zPhi[(j - l) * (nz+l) + (i-diffOrder)] = pCurFdm[j * (nz+2*l) + i]; pCurFdm_diffOut_zPhi = diffOperator2d(pCurFdm_diffIn_zPhi, nz+l, nx, pCoeff, diffOrder, dz, 1); for (j = 0; j < nx; j++) for (i = l; i < nz + l; i++) pzPhi[j * (nz+2*l) + i] = pzb[j * nz + (i - l)] * pzPhi[j * (nz+2*l) + i] + (pzb[j * nz + (i - l)] - 1) * pCurFdm_diffOut_zPhi[j * nz + (i - l)]; /* xPhi(:, ixi) = xb .* xPhi(:, ixi) + (xb - 1) .* diffOperator(fdm(izi, ixl+1, 2), coeff, dx, 2); */ for (j = diffOrder; j < nx+2*l-diffOrder+1; j++) for (i = l; i < nz+l; i++) pCurFdm_diffIn_xPhi[(j-diffOrder) * nz + (i - l)] = pCurFdm[j * (nz+2*l) + i]; pCurFdm_diffOut_xPhi = diffOperator2d(pCurFdm_diffIn_xPhi, nz, nx+l, pCoeff, diffOrder, dx, 2); for (j = l; j < nx + l; j++) for (i = 0; i < nz; i++) pxPhi[j * nz + i] = pxb[(j - l) * nz + i] * pxPhi[j * nz + i] + (pxb[(j - l) * nz + i] - 1) * pCurFdm_diffOut_xPhi[(j - l) * nz + i]; /* zA(izl, :) = diffOperator(fdm(:, ixi, 2), coeff, dz, 1) + zPhi(izl, :); */ memcpy(pCurFdm_diffIn_zA, pCurFdm + l * (nz+2*l), sizeof(double) * nx * (nz+2*l)); pCurFdm_diffOut_zA = diffOperator2d(pCurFdm_diffIn_zA, nz+2*l, nx, pCoeff, diffOrder, dz, 1); for (j = 0; j < nx; j++) for (i = diffOrder - 1; i < nz+2*l-diffOrder; i++) pzA[j * (nz+2*l) + i] = pCurFdm_diffOut_zA[j * (nz+l) + (i - (diffOrder - 1))] + pzPhi[j * (nz+2*l) + i]; /* xA(:, ixl) = diffOperator(fdm(izi, :, 2), coeff, dx, 2) + xPhi(:, ixl); */ for (j = 0; j < nx+2*l; j++) for (i = l; i < nz+l; i++) pCurFdm_diffIn_xA[j * nz + (i - l)] = pCurFdm[j * (nz+2*l) + i]; pCurFdm_diffOut_xA = diffOperator2d(pCurFdm_diffIn_xA, nz, nx+2*l, pCoeff, diffOrder, dx, 2); for (j = diffOrder - 1; j < nx+2*l-diffOrder; j++) for (i = 0; i < nz; i++) pxA[j * nz + i] = pCurFdm_diffOut_xA[(j - (diffOrder - 1)) * nz + i] + pxPhi[j * nz + i]; /* zPsi(izi, :) = zb .* zPsi(izi, :) + (zb - 1) .* diffOperator(zA(izl, :), coeff, dz, 1); */ for (j = 0; j < nx; j++) for (i = diffOrder - 1; i < nz+2*l-diffOrder; i++) pzA_diffIn[j * (nz+l) + (i - (diffOrder - 1))] = pzA[j * (nz+2*l) + i]; pzA_diffOut = diffOperator2d(pzA_diffIn, nz+l, nx, pCoeff, diffOrder, dz, 1); for (j = 0; j < nx; j++) for (i = l; i < nz + l; i++) pzPsi[j * (nz+l) + i] = pzb[j * nz + (i - l)] * pzPsi[j * (nz+l) + i] + (pzb[j * nz + (i - l)] - 1) * pzA_diffOut[j * nz + (i - l)]; /* xPsi(:, ixi) = xb .* xPsi(:, ixi) + (xb - 1) .* diffOperator(xA(:, ixl), coeff, dx, 2); */ memcpy(pxA_diffIn, pxA + (diffOrder - 1) * nz, sizeof(double) * (nx+l) * nz); pxA_diffOut = diffOperator2d(pxA_diffIn, nz, nx+l, pCoeff, diffOrder, dx, 2); for (j = l; j < nx + l; j++) for (i = 0; i < nz; i++) pxPsi[j * nz + i] = pxb[(j - l) * nz + i] * pxPsi[j * nz + i] + (pxb[(j - l) * nz + i] - 1) * pxA_diffOut[(j - l) * nz + i]; /* zP(izi, :) = diffOperator(zA(izl, :), coeff, dz, 1) + zPsi(izi, :); */ for (j = 0; j < nx; j++) for (i = l; i < nz + l; i++) pzP[j * (nz+l) + i] = pzA_diffOut[j * nz + (i - l)] + pzPsi[j * (nz+l) + i]; /* xP(:, ixi) = diffOperator(xA(:, ixl), coeff, dx, 2) + xPsi(:, ixi); */ for (j = l; j < nx + l; j++) for (i = 0; i < nz; i++) pxP[j * nz + i] = pxA_diffOut[(j - l) * nz + i] + pxPsi[j * nz + i]; /* ====================================================================== * One-step finite difference calculation * ====================================================================== */ /* fdm(izi, ixi, 3) = vdtSq .* (zP(izi, :) + xP(:, ixi) + source(:, :, it)) + 2 * fdm(izi, ixi, 2) - fdm(izi, ixi, 1); */ #pragma omp parallel private(j, i) { #pragma omp for schedule(static, 8) for (j = l; j < nx + l; j++) for (i = l; i < nz + l; i++) pNewFdm[j * (nz+2*l) + i] = pVdtSq[(j - l) * nz + (i - l)] * ( pzP[(j - l) * (nz+l) + i] + pxP[j * nz + (i - l)] + pSource[t * (nz * nx) + (j - l) * nz + (i - l)] ) + 2 * pCurFdm[j * (nz+2*l) + i] - pOldFdm[j * (nz+2*l) + i]; } /* update finite difference matrices */ /* fdm(:, :, 1) = fdm(:, :, 2); */ memcpy(pOldFdm, pCurFdm, sizeof(double) * (nz+2*l) * (nx+2*l)); /* fdm(:, :, 2) = fdm(:, :, 3); */ memcpy(pCurFdm, pNewFdm, sizeof(double) * (nz+2*l) * (nx+2*l)); /* update data */ /* data(:, it) = fdm(l, ixi, 2); */ for (i = 0; i < nx; i++) pData[t * nx + i] = pCurFdm[(i + l) * (nz+2*l) + l]; /* update snapshot */ /* snapshot(:, :, it) = fdm(izi, ixi, 2); */ for (j = 0; j < nx; j++) for (i = 0; i < nz; i++) pSnapshot[t * (nz * nx) + j * nz + i] = pCurFdm[(j + l) * (nz+2*l) + (i + l)]; /* ATTENTION: Don't forget to free dynamic memory allocated by mxCalloc function (except for output arrays), otherwise memory leak will occur */ mxFree(pCurFdm_diffOut_zPhi); mxFree(pCurFdm_diffOut_xPhi); mxFree(pCurFdm_diffOut_zA); mxFree(pCurFdm_diffOut_xA); mxFree(pzA_diffOut); mxFree(pxA_diffOut); } /* test begin */ /*TEST_OUT = curFdm;*/ /* test end */ /* ATTENTION: Don't forget to free dynamic memory allocated by mxCalloc function (except for output arrays), otherwise memory leak will occur */ mxFree(pCoeff); mxFree(pOldFdm); mxFree(pCurFdm); mxFree(pNewFdm); mxFree(puDampLeft); mxFree(pvDampLeft); mxFree(puDampRight); mxFree(pvDampRight); mxFree(puDampDown); mxFree(pvDampDown); mxFree(pxDampLeft); mxFree(pxDampRight); mxFree(pxDamp); mxFree(pxb); mxFree(pzDampDown); mxFree(pzDamp); mxFree(pzb); mxFree(pVdtSq); mxFree(pzPhi); mxFree(pxPhi); mxFree(pzA); mxFree(pxA); mxFree(pzPsi); mxFree(pxPsi); mxFree(pzP); mxFree(pxP); mxFree(pCurFdm_diffIn_zPhi); mxFree(pCurFdm_diffIn_xPhi); mxFree(pCurFdm_diffIn_zA); mxFree(pCurFdm_diffIn_xA); mxFree(pzA_diffIn); mxFree(pxA_diffIn); }
core_slascl.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlascl.c, normal z -> s, Fri Sep 28 17:38:21 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /******************************************************************************/ __attribute__((weak)) void plasma_core_slascl(plasma_enum_t uplo, float cfrom, float cto, int m, int n, float *A, int lda) { // LAPACKE_slascl is not available in LAPACKE < 3.6.0 int kl; int ku; int info; char type = lapack_const(uplo); LAPACK_slascl(&type, &kl, &ku, &cfrom, &cto, &m, &n, A, &lda, &info); } /******************************************************************************/ void plasma_core_omp_slascl(plasma_enum_t uplo, float cfrom, float cto, int m, int n, float *A, int lda, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) { if (sequence->status == PlasmaSuccess) plasma_core_slascl(uplo, cfrom, cto, m, n, A, lda); } }
graph.h
//MIT License // //Copyright (c) 2021 Mobius Authors //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. //from https://github.com/sunbelbd/mobius/blob/e2d166547d61d791da8f06747a63b9cd38f02c71/graph.h #pragma once #include<vector> #include<algorithm> #include<queue> #include<stdlib.h> #include<random> #include<unordered_set> #include<mutex> #include<time.h> #include"config.h" #include"data.h" #ifdef OMP #include<omp.h> #endif typedef unsigned int vl_type; class VisitedList { public: vl_type curV; vl_type *mass; unsigned int numelements; VisitedList(int numelements1) { curV = 1; numelements = numelements1; mass = new vl_type[numelements]; memset(mass, 0, sizeof(vl_type) * numelements); } void reset() { ++curV; if (curV == 0) { curV = 1; memset(mass, 0, sizeof(vl_type) * numelements); } }; ~VisitedList() { delete mass; } }; struct GraphMeasures{ int distance_cnt = 0; }; class GraphWrapper{ public: virtual void add_vertex(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point) = 0; virtual void add_vertex_lock(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point) = 0; virtual void search_top_k(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result) = 0; virtual void search_top_k_with_score(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result,std::vector<double>& score){} virtual void dump(std::string path = "bfsg.graph") = 0; virtual void load(std::string path = "bfsg.graph") = 0; virtual ~GraphWrapper(){} virtual void set_construct_pq_size(int size){}; GraphMeasures measures; }; template<const int dist_type> class FixedDegreeGraph : public GraphWrapper{ private: const int degree = SEARCH_DEGREE; const int flexible_degree = FIXED_DEGREE; const int vertex_offset_shift = FIXED_DEGREE_SHIFT; std::vector<idx_t> edges; std::vector<dist_t> edge_dist; Data* data; std::mt19937_64 rand_gen = std::mt19937_64(1234567);//std::random_device{}()); std::vector<std::mutex> edge_mutex;//do not push back on this vector, it will destroy the mutex bool debug = false; VisitedList* p_visited = NULL; #ifdef OMP std::vector<VisitedList*> visited_pool; #endif int construct_pq_size = CONSTRUCT_SEARCH_BUDGET; void rank_and_switch_ordered(idx_t v_id,idx_t u_id){ //We assume the neighbors of v_ids in edges[offset] are sorted //by the distance to v_id ascendingly when it is full //NOTICE: before it is full, it is unsorted auto curr_dist = pair_distance(v_id,u_id); auto offset = ((size_t)v_id) << vertex_offset_shift; int degree = edges[offset]; std::vector<idx_t> neighbor; neighbor.reserve(degree + 1); for(int i = 0;i < degree;++i) neighbor.push_back(edges[offset + i + 1]); neighbor.push_back(u_id); neighbor = edge_selection_filter_neighbor(neighbor,v_id,flexible_degree); edges[offset] = neighbor.size(); for(int i = 0;i < neighbor.size();++i) edges[offset + i + 1] = neighbor[i]; return; //We assert edges[offset] > 0 here if(curr_dist >= edge_dist[offset + edges[offset]]){ return; } edges[offset + edges[offset]] = u_id; edge_dist[offset + edges[offset]] = curr_dist; for(size_t i = offset + edges[offset] - 1;i > offset;--i){ if(edge_dist[i] > edge_dist[i + 1]){ std::swap(edges[i],edges[i + 1]); std::swap(edge_dist[i],edge_dist[i + 1]); }else{ break; } } } void rank_and_switch(idx_t v_id,idx_t u_id){ rank_and_switch_ordered(v_id,u_id); //TODO: //Implement an unordered version to compare with } template<class T> dist_t distance(idx_t a,T& b){ if(dist_type == 0) return data->l2_distance(a,b); else if(dist_type == 1) return data->negative_inner_prod_distance(a,b); else if(dist_type == 2) return data->negative_cosine_distance(a,b); else if(dist_type == 3) return data->l2_distance(a,b); else if(dist_type == 4) return data->ipwrap_l2_build_distance(a,b); else if(dist_type == 5) return data->ipwrap_l2_query_distance(a,b); else{ // should not happen fprintf(stderr,"unsupported dist_type %d\n",dist_type); return 0; } } void compute_distance_naive(size_t offset,std::vector<dist_t>& dists){ dists.resize(edges[offset]); auto degree = edges[offset]; for(int i = 0;i < degree;++i){ dists[i] = distance(offset >> vertex_offset_shift,edges[offset + i + 1]); } } void compute_distance(size_t offset,std::vector<dist_t>& dists){ compute_distance_naive(offset,dists); } template<class T> dist_t pair_distance_naive(idx_t a,T& b){ ++measures.distance_cnt; return distance(a,b); } template<class T> dist_t pair_distance(idx_t a,T& b){ return pair_distance_naive(a,b); } void qsort(size_t l,size_t r){ auto mid = (l + r) >> 1; int i = l,j = r; auto k = edge_dist[mid]; do{ while(edge_dist[i] < k) ++i; while(k < edge_dist[j]) --j; if(i <= j){ std::swap(edge_dist[i],edge_dist[j]); std::swap(edges[i],edges[j]); ++i; --j; } }while(i <= j); if(i < r)qsort(i,r); if(l < j)qsort(l,j); } void rank_edges(size_t offset){ std::vector<dist_t> dists; compute_distance(offset,dists); for(int i = 0;i < dists.size();++i) edge_dist[offset + i + 1] = dists[i]; qsort(offset + 1,offset + dists.size()); //TODO: //use a heap in the edge_dist } void add_edge_lock(idx_t v_id,idx_t u_id){ edge_mutex[v_id].lock(); auto offset = ((size_t)v_id) << vertex_offset_shift; if(edges[offset] < flexible_degree){ ++edges[offset]; edges[offset + edges[offset]] = u_id; }else{ rank_and_switch(v_id,u_id); } edge_mutex[v_id].unlock(); } void add_edge(idx_t v_id,idx_t u_id){ auto offset = ((size_t)v_id) << vertex_offset_shift; if(edges[offset] < flexible_degree){ ++edges[offset]; edges[offset + edges[offset]] = u_id; }else{ rank_and_switch(v_id,u_id); } } public: long long total_explore_cnt = 0; int total_explore_times = 0; size_t search_start_point = 0; bool ignore_startpoint = false; FixedDegreeGraph(Data* data) : data(data){ auto num_vertices = data->max_vertices(); edges = std::vector<idx_t>(((size_t)num_vertices) << vertex_offset_shift); edge_dist = std::vector<dist_t>(((size_t)num_vertices) << vertex_offset_shift); edge_mutex = std::vector<std::mutex>(num_vertices); p_visited = new VisitedList(num_vertices + 5); #ifdef OMP int n_threads = 1; #pragma omp parallel #pragma omp master { n_threads = omp_get_num_threads(); } visited_pool.resize(n_threads); for(int i = 0;i < n_threads;++i) visited_pool[i] = new VisitedList(num_vertices + 5); #endif } void set_construct_pq_size(int size){ construct_pq_size = size; } std::vector<idx_t> edge_selection_filter_neighbor(std::vector<idx_t>& neighbor,idx_t vertex_id,int desired_size){ std::vector<idx_t> filtered_neighbor; std::vector<dist_t> dists(neighbor.size()); for(int i = 0;i < dists.size();++i) dists[i] = pair_distance(vertex_id,neighbor[i]); std::vector<int> idx(neighbor.size()); for(int i = 0;i < idx.size();++i) idx[i] = i; std::sort(idx.begin(),idx.end(),[&](int a,int b){return dists[a] < dists[b];}); for(int i = 0;i < idx.size();++i){ dist_t cur_dist = dists[idx[i]]; bool pass = true; for(auto neighbor_id : filtered_neighbor){ if(cur_dist > pair_distance(neighbor_id,neighbor[idx[i]])){ pass = false; break; } } if(pass){ filtered_neighbor.push_back(neighbor[idx[i]]); if(filtered_neighbor.size() >= desired_size) break; }else{ } } return std::move(filtered_neighbor); } void add_vertex_lock(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point){ std::vector<idx_t> neighbor; search_top_k_lock(point,construct_pq_size,neighbor); auto offset = ((size_t)vertex_id) << vertex_offset_shift; int num_neighbors = degree < neighbor.size() ? degree : neighbor.size(); edge_mutex[vertex_id].lock(); // TODO: // it is possible to save this space --- edges[offset] // by set the last number in the range as // a large number - current degree if(neighbor.size() >= degree) neighbor = edge_selection_filter_neighbor(neighbor,vertex_id,degree); edges[offset] = neighbor.size(); for(int i = 0;i < neighbor.size() && i < degree;++i){ edges[offset + i + 1] = neighbor[i]; } edge_mutex[vertex_id].unlock(); for(int i = 0;i < neighbor.size() && i < degree;++i){ add_edge_lock(neighbor[i],vertex_id); } } void add_vertex(idx_t vertex_id,std::vector<std::pair<int,value_t>>& point){ std::vector<idx_t> neighbor; search_top_k(point,construct_pq_size,neighbor); auto offset = ((size_t)vertex_id) << vertex_offset_shift; int num_neighbors = degree < neighbor.size() ? degree : neighbor.size(); // TODO: // it is possible to save this space --- edges[offset] // by set the last number in the range as // a large number - current degree if(neighbor.size() >= degree){ neighbor = edge_selection_filter_neighbor(neighbor,vertex_id,degree); } edges[offset] = neighbor.size(); for(int i = 0;i < neighbor.size() && i < degree;++i){ edges[offset + i + 1] = neighbor[i]; } for(int i = 0;i < neighbor.size() && i < degree;++i){ add_edge(neighbor[i],vertex_id); } } void astar_multi_start_search_lock(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ std::priority_queue<std::pair<dist_t,idx_t>,std::vector<std::pair<dist_t,idx_t>>,std::greater<std::pair<dist_t,idx_t>>> q; const int num_start_point = 1; auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); #ifdef OMP int tid = omp_get_thread_num(); auto& p_visited = visited_pool[tid]; #endif p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); } std::priority_queue<std::pair<dist_t,idx_t>> topk; const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step && !q.empty();++iter){ auto now = q.top(); if(topk.size() == k && topk.top().first < now.first){ break; } ++explore_cnt; min_dist = std::min(min_dist,now.first); q.pop(); if(ignore_startpoint == false || iter != 0) topk.push(now); if(topk.size() > k) topk.pop(); edge_mutex[now.second].lock(); auto offset = ((size_t)now.second) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(topk.empty() || dist < topk.top().first || topk.size() < k) q.push(std::make_pair(dist,start)); } edge_mutex[now.second].unlock(); } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(topk.size()); int i = result.size() - 1; while(!topk.empty()){ result[i] = (topk.top().second); topk.pop(); --i; } } void astar_no_heap_search(const std::vector<std::pair<int,value_t>>& query,std::vector<idx_t>& result){ const int num_start_point = 1; std::pair<dist_t,idx_t> q_top = std::make_pair(10000000000,0); auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); p_visited->mass[start] = tag; if(ignore_startpoint == false){ q_top = (std::make_pair(pair_distance_naive(start,converted_query),start)); }else{ auto offset = ((size_t)start) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 1;i <= degree;++i){ p_visited->mass[edges[offset + i]] = tag; auto dis = pair_distance_naive(edges[offset + i],converted_query); if(dis < q_top.first) q_top = (std::make_pair(dis,start)); } } } const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step;++iter){ ++explore_cnt; auto offset = ((size_t)q_top.second) << vertex_offset_shift; auto degree = edges[offset]; bool changed = false; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(dist < q_top.first){ q_top = (std::make_pair(dist,start)); changed = true; } } if(changed == false) break; } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(1); result[0] = q_top.second; } void astar_multi_start_search_with_score(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result,std::vector<double>& score){ std::priority_queue<std::pair<dist_t,idx_t>,std::vector<std::pair<dist_t,idx_t>>,std::greater<std::pair<dist_t,idx_t>>> q; const int num_start_point = 1; auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); } std::priority_queue<std::pair<dist_t,idx_t>> topk; const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step && !q.empty();++iter){ auto now = q.top(); if(topk.size() == k && topk.top().first < now.first){ break; } ++explore_cnt; min_dist = std::min(min_dist,now.first); q.pop(); if(ignore_startpoint == false || iter != 0) topk.push(now); if(topk.size() > k) topk.pop(); auto offset = ((size_t)now.second) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(topk.empty() || dist < topk.top().first || topk.size() < k) q.push(std::make_pair(dist,start)); } } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(topk.size()); score.resize(topk.size()); int i = result.size() - 1; while(!topk.empty()){ result[i] = (topk.top().second); score[i] = -(topk.top().first); topk.pop(); --i; } } void astar_multi_start_search(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ std::priority_queue<std::pair<dist_t,idx_t>,std::vector<std::pair<dist_t,idx_t>>,std::greater<std::pair<dist_t,idx_t>>> q; const int num_start_point = 1; auto converted_query = dist_type == 3 ? data->organize_point_mobius(query) : data->organize_point(query); p_visited->reset(); auto tag = p_visited->curV; for(int i = 0;i < num_start_point && i < data->curr_vertices();++i){ auto start = search_start_point;//rand_gen() % data->curr_vertices(); if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; q.push(std::make_pair(pair_distance_naive(start,converted_query),start)); } std::priority_queue<std::pair<dist_t,idx_t>> topk; const int max_step = 1000000; bool found_min_node = false; dist_t min_dist = 1e100; int explore_cnt = 0; for(int iter = 0;iter < max_step && !q.empty();++iter){ auto now = q.top(); if(topk.size() == k && topk.top().first < now.first){ break; } ++explore_cnt; min_dist = std::min(min_dist,now.first); q.pop(); if(ignore_startpoint == false || iter != 0) topk.push(now); if(topk.size() > k) topk.pop(); auto offset = ((size_t)now.second) << vertex_offset_shift; auto degree = edges[offset]; for(int i = 0;i < degree;++i){ auto start = edges[offset + i + 1]; if(p_visited->mass[start] == tag) continue; p_visited->mass[start] = tag; auto dist = pair_distance_naive(start,converted_query); if(topk.empty() || dist < topk.top().first || topk.size() < k) q.push(std::make_pair(dist,start)); } } total_explore_cnt += explore_cnt; ++total_explore_times; result.resize(topk.size()); int i = result.size() - 1; while(!topk.empty()){ result[i] = (topk.top().second); topk.pop(); --i; } } void search_top_k(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ if(k == 1) astar_no_heap_search(query,result); else astar_multi_start_search(query,k,result); } void search_top_k_with_score(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result,std::vector<double>& score){ astar_multi_start_search_with_score(query,k,result,score); } void search_top_k_lock(const std::vector<std::pair<int,value_t>>& query,int k,std::vector<idx_t>& result){ astar_multi_start_search_lock(query,k,result); } void print_stat(){ auto n = data->max_vertices(); size_t sum = 0; std::vector<size_t> histogram(2 * degree + 1,0); for(size_t i = 0;i < n;++i){ sum += edges[i << vertex_offset_shift]; int tmp = edges[i << vertex_offset_shift]; if(tmp > 2 * degree + 1) fprintf(stderr,"[ERROR] node %zu has %d degree\n",i,tmp); ++histogram[edges[i << vertex_offset_shift]]; if(tmp != degree) fprintf(stderr,"[INFO] %zu has degree %d\n",i,tmp); } fprintf(stderr,"[INFO] #vertices %zu, avg degree %f\n",n,sum * 1.0 / n); std::unordered_set<idx_t> visited; fprintf(stderr,"[INFO] degree histogram:\n"); for(int i = 0;i <= 2 * degree + 1;++i) fprintf(stderr,"[INFO] %d:\t%zu\n",i,histogram[i]); } void print_edges(int x){ for(size_t i = 0;i < x;++i){ size_t offset = i << vertex_offset_shift; int degree = edges[offset]; fprintf(stderr,"%d (%d): ",i,degree); for(int j = 1;j <= degree;++j) fprintf(stderr,"(%zu,%f) ",edges[offset + j],edge_dist[offset + j]); fprintf(stderr,"\n"); } } void dump(std::string path = "bfsg.graph"){ FILE* fp = fopen(path.c_str(),"wb"); size_t num_vertices = data->max_vertices(); fwrite(&edges[0],sizeof(edges[0]) * (num_vertices << vertex_offset_shift),1,fp); fclose(fp); } void load(std::string path = "bfsg.graph"){ FILE* fp = fopen(path.c_str(),"rb"); size_t num_vertices = data->max_vertices(); auto cnt = fread(&edges[0],sizeof(edges[0]) * (num_vertices << vertex_offset_shift),1,fp); fclose(fp); } Data* get_data(){ return data; } };
tree_utils.h
// // Created by Atharva on 6/26/20. // #ifndef TREE_TRAVERSALS_TREE_UTILS_H #define TREE_TRAVERSALS_TREE_UTILS_H #include <malloc.h> #include <iostream> #include <omp.h> using namespace std; struct node{ int data; int children; struct node** pointers; }; typedef struct node Node; Node* createNode(int Data, int children, int depth) { Node* node= new Node(); node->data = Data; node->children = children; if(depth<7) { Node* arr = (Node*) malloc(children*sizeof(Node)); node->pointers = &arr; } else { node->pointers= nullptr; } return node; } void populateChildren(Node* node, int depth) { if (depth==7) { node = nullptr; return; } #pragma omp parallel for for (int i=0; i< node->children; i++) { int current_depth = depth; node->pointers[i] = createNode(int(rand()), 3, current_depth+1); populateChildren(node->pointers[i], current_depth+1); } } Node* createTree() { Node* head = createNode(8, 5, 0); populateChildren(head, 0); return head; } void parallel_tree_search(Node* head, int query) { if (head== nullptr) return; if(head->data==query) { cout<<"Query found at"<<head; cout<<'\n'; } #pragma omp parallel for for (int i=0;i<head->children;i++) { parallel_tree_search(head->pointers[i], query); } } #endif //TREE_TRAVERSALS_TREE_UTILS_H
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/distribute-cache-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/nt-base-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/policy.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/utility.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const IndexPacket *GetVirtualIndexesFromCache(const Image *); static const PixelPacket *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t, PixelPacket *,ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), ReadPixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *), ReadPixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *); static PixelPacket *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static volatile MagickBooleanType instantiate_cache = MagickFalse; static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *restrict cache_info; char *synchronize; cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->channels=4; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { cache_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } cache_info->semaphore=AllocateSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AllocateSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickExport MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AllocateSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickExport void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); LockSemaphoreInfo(cache_semaphore); instantiate_cache=MagickFalse; UnlockSemaphoreInfo(cache_semaphore); DestroySemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickSizeType number_pixels; NexusInfo **restrict clip_nexus, **restrict image_nexus; register const PixelPacket *restrict r; register IndexPacket *restrict nexus_indexes, *restrict indexes; register PixelPacket *restrict p, *restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->clip_mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); if ((image_nexus == (NexusInfo **) NULL) || (clip_nexus == (NexusInfo **) NULL)) ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,image_nexus[0], exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],exception); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; if (GetPixelIntensity(image,r) > (QuantumRange/2)) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,GetPixelOpacity(p)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); } p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport Cache ClonePixelCache(const Cache cache) { CacheInfo *restrict clone_info; const CacheInfo *restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); if (clone_info == (Cache) NULL) return((Cache) NULL); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *restrict cache_info, *restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(PixelPacket *destination, const PixelPacket *source,const MagickSizeType number_pixels) { #if !defined(MAGICKCORE_OPENMP_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH <= 8) (void) memcpy(destination,source,(size_t) number_pixels*sizeof(*source)); #else { register MagickOffsetType i; if ((number_pixels*sizeof(*source)) < MagickMaxBufferExtent) { (void) memcpy(destination,source,(size_t) number_pixels* sizeof(*source)); return; } #pragma omp parallel for for (i=0; i < (MagickOffsetType) number_pixels; i++) destination[i]=source[i]; } #endif } static inline MagickSizeType MagickMin(const MagickSizeType x, const MagickSizeType y) { if (x < y) return(x); return(y); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *restrict clone_info,CacheInfo *restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads 2 #define cache_threads(source,destination,chunk) \ num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \ GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \ GetMagickResourceLimit(ThreadResource) : MaxCacheThreads) MagickBooleanType status; NexusInfo **restrict cache_nexus, **restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->active_index_channel == clone_info->active_index_channel)) { /* Identical pixel cache morphology. */ CopyPixels(clone_info->pixels,cache_info->pixels,cache_info->columns* cache_info->rows); if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) (void) memcpy(clone_info->indexes,cache_info->indexes, cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes)); return(MagickTrue); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->pixels); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info,cache_info->rows) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickTrue, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickTrue, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length); status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) { /* Clone indexes. */ length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->indexes); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ cache_threads(cache_info,clone_info,cache_info->rows) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickTrue, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickTrue, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length); status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { if (cache_info->mapped == MagickFalse) cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory( cache_info->pixels); else { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; } RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; if (cache_info->mode != ReadMode) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if (cache_info->mode != ReadMode) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->indexes=(IndexPacket *) NULL; } MagickExport Cache DestroyPixelCache(Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(PixelPacket *) NULL; nexus_info->pixels=(PixelPacket *) NULL; nexus_info->indexes=(IndexPacket *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (PixelPacket *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache(). % % The format of the GetAuthenticIndexesFromCache() method is: % % IndexPacket *GetAuthenticIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static IndexPacket *GetAuthenticIndexesFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexQueue() returns the authentic black channel or the colormap % indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetAuthenticIndexQueue() method is: % % IndexPacket *GetAuthenticIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) return(cache_info->methods.get_authentic_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; PixelPacket *restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((PixelPacket *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); if (cache_info->active_index_channel != MagickFalse) if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % PixelPacket *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static PixelPacket *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % PixelPacket *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking % GetAuthenticPixels() to obtain the black color component or colormap indexes % (of type IndexPacket) corresponding to the region. Once the PixelPacket % (and/or IndexPacket) array has been updated, the changes must be saved back % to the underlying image using SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *restrict image) { CacheInfo *restrict cache_info; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cpu_throttle = 0, cycles = 0, time_limit = 0; static time_t cache_timestamp = 0; status=MagickTrue; LockSemaphoreInfo(image->semaphore); if (cpu_throttle == 0) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (time_limit == 0) { /* Set the expire time in seconds. */ time_limit=GetMagickResourceLimit(TimeResource); cache_timestamp=time((time_t *) NULL); } if ((time_limit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AllocateSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status != MagickFalse) { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status != MagickFalse) { if (cache_info->reference_count == 1) cache_info->nexus_info=(NexusInfo **) NULL; destroy=MagickTrue; image->cache=clone_image.cache; } } DestroySemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MapCache, MemoryCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetPixelCacheType(const Image *image) { return(GetImagePixelCacheType(image)); } MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; PixelPacket *restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y, pixel,exception)); pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,PixelPacket *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMagickPixel() method is: % % MagickBooleanType GetOneVirtualMagickPixel(const Image image, % const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, % ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M e t h o d P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMethodPixel() method is: % % MagickBooleanType GetOneVirtualMethodPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, virtual_pixel_method,x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelPacket method,const ssize_t x,const ssize_t y, % PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); *pixel=image->background_color; pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheChannels() returns the number of pixel channels associated % with this instance of the pixel cache. % % The format of the GetPixelCacheChannels() method is: % % size_t GetPixelCacheChannels(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheChannels returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport size_t GetPixelCacheChannels(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_indexes_from_handler= GetAuthenticIndexesFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated with % the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=cache_info->length; return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimize cache tile width in pixels. % % o height: the optimize cache tile height in pixels. % */ MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *width=2048UL/sizeof(PixelPacket); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/sizeof(PixelPacket); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualIndexesFromCache() method is: % % IndexPacket *GetVirtualIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const IndexPacket *GetVirtualIndexesFromCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromNexus() returns the indexes associated with the % specified cache nexus. % % The format of the GetVirtualIndexesFromNexus() method is: % % const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap indexes. % */ MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->storage_class == UndefinedClass) return((IndexPacket *) NULL); return(nexus_info->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexQueue() returns the virtual black channel or the % colormap indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetVirtualIndexQueue() method is: % % const IndexPacket *GetVirtualIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) return(cache_info->methods.get_virtual_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % PixelPacket *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } /* VirtualPixelModulo() computes the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *restrict cache_info; IndexPacket virtual_index; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **restrict virtual_nexus; PixelPacket *restrict pixels, virtual_pixel; RectangleInfo region; register const IndexPacket *restrict virtual_indexes; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t u, v; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return((const PixelPacket *) NULL); region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((const PixelPacket *) NULL); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); if ((cache_info->storage_class == PseudoClass) || (cache_info->colorspace == CMYKColorspace)) { status=ReadPixelCacheIndexes(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); } return(pixels); } /* Pixel request is outside cache extents. */ q=pixels; indexes=nexus_info->indexes; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const PixelPacket *) NULL); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case GrayVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange/2); SetPixelGreen(&virtual_pixel,QuantumRange/2); SetPixelBlue(&virtual_pixel,QuantumRange/2); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case TransparentVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,TransparentOpacity); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange); SetPixelGreen(&virtual_pixel,QuantumRange); SetPixelBlue(&virtual_pixel,QuantumRange); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } default: { virtual_pixel=image->background_color; break; } } virtual_index=0; for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } } if (p == (const PixelPacket *) NULL) break; *q++=(*p); if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) *indexes++=(*virtual_indexes); continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const PixelPacket *) NULL) break; virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*sizeof(*p)); q+=length; if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) { (void) memcpy(indexes,virtual_indexes,(size_t) length* sizeof(*virtual_indexes)); indexes+=length; } } } virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const PixelPacket *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const PixelPacket *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated with the % last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const PixelPacket *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated with the last call % to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % PixelPacket *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const PixelPacket *GetVirtualPixelsCache(const Image *image) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const IndexPacket *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->storage_class == UndefinedClass) return((PixelPacket *) NULL); return((const PixelPacket *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline void MagickPixelCompositeMask(const MagickPixelPacket *p, const MagickRealType alpha,const MagickPixelPacket *q, const MagickRealType beta,MagickPixelPacket *composite) { double gamma; if (alpha == TransparentOpacity) { *composite=(*q); return; } gamma=1.0-QuantumScale*QuantumScale*alpha*beta; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta); composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta); composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta); if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace)) composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickPixelPacket alpha, beta; MagickSizeType number_pixels; NexusInfo **restrict clip_nexus, **restrict image_nexus; register const PixelPacket *restrict r; register IndexPacket *restrict nexus_indexes, *restrict indexes; register PixelPacket *restrict p, *restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); if ((image_nexus == (NexusInfo **) NULL) || (clip_nexus == (NexusInfo **) NULL)) ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x, nexus_info->region.y,nexus_info->region.width,nexus_info->region.height, image_nexus[0],exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],&image->exception); GetMagickPixelPacket(image,&alpha); GetMagickPixelPacket(image,&beta); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; SetMagickPixelPacket(image,p,indexes+i,&alpha); SetMagickPixelPacket(image,q,nexus_indexes+i,&beta); MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha, alpha.opacity,&beta); SetPixelRed(q,ClampToQuantum(beta.red)); SetPixelGreen(q,ClampToQuantum(beta.green)); SetPixelBlue(q,ClampToQuantum(beta.blue)); SetPixelOpacity(q,ClampToQuantum(beta.opacity)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % colormap indexes, and memory mapping the cache if it is disk based. The % cache nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static inline void AllocatePixelCachePixels(CacheInfo *cache_info) { cache_info->mapped=MagickFalse; cache_info->pixels=(PixelPacket *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->mapped=MagickTrue; cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if defined(SIGBUS) static void CacheSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache"); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if (cache_info->file != -1) return(MagickTrue); /* cache already open */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); cache_info->file=file; cache_info->mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MaxTextExtent], message[MaxTextExtent]; (void) FormatMagickSize(length,MagickFalse,format); (void) FormatLocaleString(message,MaxTextExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) { int status; status=posix_fallocate(cache_info->file,offset+1,extent-offset); if (status != 0) return(MagickFalse); } #endif #if defined(SIGBUS) (void) signal(SIGBUS,CacheSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *restrict cache_info, source_info; char format[MaxTextExtent], message[MaxTextExtent]; const char *type; MagickSizeType length, number_pixels; MagickStatusType status; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]", image->filename,(double) GetImageIndexInList(image)); cache_info->mode=mode; cache_info->rows=image->rows; cache_info->columns=image->columns; cache_info->channels=image->channels; cache_info->active_index_channel=((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; cache_info->pixels=(PixelPacket *) NULL; cache_info->indexes=(IndexPacket *) NULL; cache_info->length=0; return(MagickTrue); } number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) packet_size+=sizeof(IndexPacket); length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if (cache_info->columns != columns) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; status=AcquireMagickResource(AreaResource,cache_info->length); length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) || (cache_info->type == MemoryCache)) { AllocatePixelCachePixels(cache_info); if (cache_info->pixels == (PixelPacket *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->colorspace=image->colorspace; cache_info->type=MemoryCache; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status&=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s %s, %.20gx%.20g %s)",cache_info->filename, cache_info->mapped != MagickFalse ? "Anonymous" : "Heap", type,(double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; return(MagickTrue); } } RelinquishMagickResource(MemoryResource,cache_info->length); } /* Create pixel cache on disk. */ status=AcquireMagickResource(DiskResource,cache_info->length); if ((status == MagickFalse) || (cache_info->type == DistributedCache)) { DistributeCacheInfo *server_info; if (cache_info->type == DistributedCache) RelinquishMagickResource(DiskResource,cache_info->length); server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ cache_info->type=DistributedCache; cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MaxTextExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse, format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,GetDistributeCacheFile( (DistributeCacheInfo *) cache_info->server_info),type, (double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(MagickTrue); } } RelinquishMagickResource(DiskResource,cache_info->length); (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { RelinquishMagickResource(DiskResource,cache_info->length); ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if ((status == MagickFalse) && (cache_info->type != MapCache) && (cache_info->type != MemoryCache)) cache_info->type=DiskCache; else { cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->pixels=source_info.pixels; cache_info->type=DiskCache; } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(MagickTrue); } } RelinquishMagickResource(MapResource,cache_info->length); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *restrict cache_info, *restrict clone_info; Image clone_image; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) && (cache_info->reference_count == 1)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) && (cache_info->reference_count == 1)) { int status; /* Usurp existing persistent pixel cache. */ status=rename_utf8(cache_info->cache_filename,filename); if (status == 0) { (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); *offset+=cache_info->length+page_size-(cache_info->length % page_size); UnlockSemaphoreInfo(cache_info->semaphore); cache_info=(CacheInfo *) ReferencePixelCache(cache_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "Usurp resident persistent cache"); return(MagickTrue); } } UnlockSemaphoreInfo(cache_info->semaphore); } /* Clone persistent pixel cache. */ clone_image=(*image); clone_info=(CacheInfo *) clone_image.cache; image->cache=ClonePixelCache(cache_info); cache_info=(CacheInfo *) ReferencePixelCache(image->cache); (void) CopyMagickString(cache_info->cache_filename,filename,MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); cache_info=(CacheInfo *) image->cache; status=OpenPixelCache(image,IOMode,exception); if (status != MagickFalse) status=ClonePixelCacheRepository(cache_info,clone_info,&image->exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info, ExceptionInfo *exception) { return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info, exception)); } MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; PixelPacket *restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((PixelPacket *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((PixelPacket *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((PixelPacket *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a PixelPacket array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheIndexes() reads colormap indexes from the specified region of % the pixel cache. % % The format of the ReadPixelCacheIndexes() method is: % % MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheIndexes(CacheInfo *restrict cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register IndexPacket *restrict q; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *restrict p; /* Read indexes from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read indexes from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q); if ((MagickSizeType) count < length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read indexes from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels(CacheInfo *restrict cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register PixelPacket *restrict q; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*q),length,(unsigned char *) q); if ((MagickSizeType) count < length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickExport Cache ReferencePixelCache(Cache cache) { CacheInfo *restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) cache_info->methods.get_virtual_indexes_from_handler= cache_methods->get_virtual_indexes_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) cache_info->methods.get_authentic_indexes_from_handler= cache_methods->get_authentic_indexes_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region, % const MagickBooleanType buffered,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o buffered: pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); nexus_info->mapped=MagickFalse; nexus_info->cache=(PixelPacket *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->mapped=MagickTrue; nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (PixelPacket *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsAuthenticPixelCache( const CacheInfo *restrict cache_info,const NexusInfo *restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { magick_unreferenced(nexus_info); magick_unreferenced(mode); if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region, const MagickBooleanType buffered,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return((PixelPacket *) NULL); nexus_info->region=(*region); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+offset; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=cache_info->indexes+offset; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; length=number_pixels*sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) length+=number_pixels*sizeof(IndexPacket); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image, const Quantum opacity) { CacheInfo *restrict cache_info; CacheView *restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); image->matte=MagickTrue; status=MagickTrue; image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, &image->exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { q->opacity=opacity; q++; } status=SyncCacheViewAuthenticPixels(image_view,&image->exception); } image_view=DestroyCacheView(image_view); return(status); } MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { CacheInfo *restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace((Image *) image,sRGBColorspace); break; } case TransparentVirtualPixelMethod: { if (image->matte == MagickFalse) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); break; } default: break; } return(method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { CacheInfo *restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->clip_mask != (Image *) NULL) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->mask != (Image *) NULL) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->active_index_channel != MagickFalse) && (WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) return(cache_info->methods.sync_authentic_pixels_handler(image,exception)); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheIndexes() writes the colormap indexes to the specified % region of the pixel cache. % % The format of the WritePixelCacheIndexes() method is: % % MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const IndexPacket *restrict p; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=(MagickSizeType) length*rows; p=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *restrict q; /* Write indexes to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write indexes to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *) p); if ((MagickSizeType) count < length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write indexes to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, NexusInfo *restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const PixelPacket *restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; p=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*p),length,(const unsigned char *) p); if ((MagickSizeType) count < length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/MacroDisable.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ assign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ multAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ divAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
GB_binop__rdiv_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_01__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_03__rdiv_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int8) // A*D function (colscale): GB (_AxD__rdiv_int8) // D*A function (rowscale): GB (_DxB__rdiv_int8) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int8) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int8) // C=scalar+B GB (_bind1st__rdiv_int8) // C=scalar+B' GB (_bind1st_tran__rdiv_int8) // C=A+scalar GB (_bind2nd__rdiv_int8) // C=A'+scalar GB (_bind2nd_tran__rdiv_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT8 || GxB_NO_RDIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rdiv_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 8) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 8) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__isne_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_int8 // A.*B function (eWiseMult): GB_AemultB__isne_int8 // A*D function (colscale): GB_AxD__isne_int8 // D*A function (rowscale): GB_DxB__isne_int8 // C+=B function (dense accum): GB_Cdense_accumB__isne_int8 // C+=b function (dense accum): GB_Cdense_accumb__isne_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int8 // C=scalar+B GB_bind1st__isne_int8 // C=scalar+B' GB_bind1st_tran__isne_int8 // C=A+scalar GB_bind2nd__isne_int8 // C=A'+scalar GB_bind2nd_tran__isne_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT8 || GxB_NO_ISNE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isne_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__minus_int16 // A.*B function (eWiseMult): GB_AemultB__minus_int16 // A*D function (colscale): GB_AxD__minus_int16 // D*A function (rowscale): GB_DxB__minus_int16 // C+=B function (dense accum): GB_Cdense_accumB__minus_int16 // C+=b function (dense accum): GB_Cdense_accumb__minus_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_int16 // C=scalar+B GB_bind1st__minus_int16 // C=scalar+B' GB_bind1st_tran__minus_int16 // C=A+scalar GB_bind2nd__minus_int16 // C=A'+scalar GB_bind2nd_tran__minus_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__minus_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__minus_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__minus_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__minus_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__minus_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__minus_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__minus_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB_bind1st_tran__minus_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB_bind2nd_tran__minus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
IO.h
// This code is part of the project "Ligra: A Lightweight Graph Processing // Framework for Shared Memory", presented at Principles and Practice of // Parallel Programming, 2013. // Copyright (c) 2013 Julian Shun and Guy Blelloch // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include <iostream> #include <fstream> #include <stdlib.h> #include <cmath> #include <sys/mman.h> #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <parallel/algorithm> #include <omp.h> #include <cassert> #include "parallel.h" #include "blockRadixSort.h" #include "quickSort.h" #include "utils.h" #include "graph.h" #include "pvector.h" #include "timer.h" #include "sliding_queue.h" #include "dbg.h" using namespace std; typedef pair<uintE,uintE> intPair; typedef pair<uintE, pair<uintE,intE> > intTriple; template <class E> struct pairFirstCmp { bool operator() (pair<uintE,E> a, pair<uintE,E> b) { return a.first < b.first; } }; template <class E> struct getFirst {uintE operator() (pair<uintE,E> a) {return a.first;} }; template <class IntType> struct pairBothCmp { bool operator() (pair<uintE,IntType> a, pair<uintE,IntType> b) { if (a.first != b.first) return a.first < b.first; return a.second < b.second; } }; // A structure that keeps a sequence of strings all allocated from // the same block of memory struct words { long n; // total number of characters char* Chars; // array storing all strings long m; // number of substrings char** Strings; // pointers to strings (all should be null terminated) words() {} words(char* C, long nn, char** S, long mm) : Chars(C), n(nn), Strings(S), m(mm) {} void del() {free(Chars); free(Strings);} }; inline bool isSpace(char c) { switch (c) { case '\r': case '\t': case '\n': case 0: case ' ' : return true; default : return false; } } _seq<char> mmapStringFromFile(const char *filename) { struct stat sb; int fd = open(filename, O_RDONLY); if (fd == -1) { perror("open"); exit(-1); } if (fstat(fd, &sb) == -1) { perror("fstat"); exit(-1); } if (!S_ISREG (sb.st_mode)) { perror("not a file\n"); exit(-1); } char *p = static_cast<char*>(mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0)); if (p == MAP_FAILED) { perror("mmap"); exit(-1); } if (close(fd) == -1) { perror("close"); exit(-1); } size_t n = sb.st_size; // char *bytes = newA(char, n); // parallel_for(size_t i=0; i<n; i++) { // bytes[i] = p[i]; // } // if (munmap(p, sb.st_size) == -1) { // perror("munmap"); // exit(-1); // } // cout << "mmapped" << endl; // free(bytes); // exit(0); return _seq<char>(p, n); } _seq<char> readStringFromFile(char *fileName) { ifstream file (fileName, ios::in | ios::binary | ios::ate); if (!file.is_open()) { std::cout << "Unable to open file: " << fileName << std::endl; abort(); } long end = file.tellg(); file.seekg (0, ios::beg); long n = end - file.tellg(); char* bytes = newA(char,n+1); assert(bytes != NULL && "Malloc failure\n"); file.read (bytes,n); file.close(); return _seq<char>(bytes,n); } // parallel code for converting a string to words words stringToWords(char *Str, long n) { {parallel_for (long i=0; i < n; i++) if (isSpace(Str[i])) Str[i] = 0; } // mark start of words bool *FL = newA(bool,n); assert(FL != NULL && "Malloc failure\n"); FL[0] = Str[0]; {parallel_for (long i=1; i < n; i++) FL[i] = Str[i] && !Str[i-1];} // offset for each start of word _seq<long> Off = sequence::packIndex<long>(FL, n); free(FL); long m = Off.n; long *offsets = Off.A; // pointer to each start of word char **SA = newA(char*, m); assert(SA != NULL && "Malloc failure\n"); {parallel_for (long j=0; j < m; j++) SA[j] = Str+offsets[j];} free(offsets); return words(Str,n,SA,m); } template <class vertex> graph<vertex> readGraphFromFile(char* fname, bool isSymmetric, bool mmap) { #ifndef VGR words W; if (mmap) { _seq<char> S = mmapStringFromFile(fname); char *bytes = newA(char, S.n); assert(bytes != NULL && "Malloc failure\n"); // Cannot mutate the graph unless we copy. parallel_for(size_t i=0; i<S.n; i++) { bytes[i] = S.A[i]; } if (munmap(S.A, S.n) == -1) { perror("munmap"); exit(-1); } S.A = bytes; W = stringToWords(S.A, S.n); } else { _seq<char> S = readStringFromFile(fname); W = stringToWords(S.A, S.n); } #ifndef WEIGHTED if (W.Strings[0] != (string) "AdjacencyGraph") { #else if (W.Strings[0] != (string) "WeightedAdjacencyGraph") { #endif cout << "Bad input file" << endl; abort(); } long len = W.m -1; long n = atol(W.Strings[1]); long m = atol(W.Strings[2]); #ifndef WEIGHTED if (len != n + m + 2) { #else if (len != n + 2*m + 2) { #endif cout << "Bad input file" << endl; abort(); } uintT* offsets = newA(uintT,n); assert(offsets != NULL && "Malloc failure\n"); #ifndef WEIGHTED uintE* edges = newA(uintE,m); #else intE* edges = newA(intE,2*m); #endif assert(edges != NULL && "Malloc failure\n"); {parallel_for(long i=0; i < n; i++) offsets[i] = atol(W.Strings[i + 3]);} {parallel_for(long i=0; i<m; i++) { #ifndef WEIGHTED edges[i] = atol(W.Strings[i+n+3]); #else edges[2*i] = atol(W.Strings[i+n+3]); edges[2*i+1] = atol(W.Strings[i+n+m+3]); #endif }} //W.del(); // to deal with performance bug in malloc W.del(); //The original code ^ commented this out #else // #ifdef VGR // Added by Priyank if ( sizeof(uintT) != sizeof(unsigned long long) ) { std::cout << sizeof(uintT) << " " << sizeof(unsigned long long) << std::endl; abort(); } if ( sizeof(uintE) != sizeof(unsigned int) ) { std::cout << sizeof(uintE) << " " << sizeof(unsigned int) << std::endl; abort(); } ifstream ifs(fname, std::ios::binary); if ( !ifs.good() ) { std::cout << "Unable to open file: " << fname << std::endl; abort(); } unsigned long long major_version; unsigned long long minor_version; unsigned long long n1; unsigned long long m1; ifs.read((char*)&major_version, sizeof(unsigned long long)); ifs.read((char*)&minor_version, sizeof(unsigned long long)); ifs.read((char*)&n1, sizeof(unsigned long long)); ifs.read((char*)&m1, sizeof(unsigned long long)); long n,m; m = m1; n = n1; #ifndef WEIGHTED if ( ( major_version != 1 ) || ( minor_version != 0 ) ) { std::cout << "major: " << major_version << " minor: " << minor_version << " n: " << n << " m: " << m << std::endl; abort(); } #else if ( ( major_version != 1 ) || ( minor_version != 4 ) ) { std::cout << "major: " << major_version << " minor: " << minor_version << " n: " << n << " m: " << m << std::endl; abort(); } #endif uintT* offsets = newA(uintT,n); assert(offsets != NULL && "Malloc failure\n"); ifs.read((char*)&offsets[1], sizeof(unsigned long long) * (n-1)); if ( !ifs || (ifs.gcount() != (sizeof(unsigned long long) * (n-1))) ) { std::cout << "Error in reading offsets." << std::endl; abort(); } unsigned long long temp; ifs.read((char*)&temp, sizeof(unsigned long long) * (1)); offsets[0] = 0; #ifndef WEIGHTED uintE* edges = newA(uintE,m); #else intE* edges_ = newA(intE,2*m); assert(edges_ != NULL && "Malloc failure\n"); intE* edges = newA(intE,2*m); #endif assert(edges != NULL && "Malloc failure\n"); #ifndef WEIGHTED ifs.read((char*)edges, sizeof(unsigned int) * m); if ( !ifs || (ifs.gcount() != (sizeof(unsigned int) * m)) ) { std::cout << "Error in reading edges." << std::endl; abort(); } #else ifs.read((char*)&edges_[0], sizeof(unsigned int) * m); if ( !ifs || (ifs.gcount() != (sizeof(unsigned int) * m)) ) { std::cout << "Error in reading edges." << std::endl; abort(); } if ( (m % 2) == 1 ) { unsigned int x = 0; ifs.read((char*)&x, sizeof(unsigned int)); //std::cout << "extra element: " << x << std::endl; assert(x == 0); } ifs.read((char*)&edges_[m], sizeof(unsigned int) * m); if ( !ifs || (ifs.gcount() != (sizeof(unsigned int) * m)) ) { std::cout << "Error in reading edges." << std::endl; abort(); } //std::cout << edges_[m] << std::endl; {parallel_for(long i=0; i<m; i++) { edges[2*i] = edges_[i]; edges[2*i+1] = edges_[i+m]; }} free(edges_); #endif ifs.close(); #endif // VGR vertex* v = newA(vertex,n); assert(v != NULL && "Malloc failure\n"); {parallel_for (uintT i=0; i < n; i++) { uintT o = offsets[i]; uintT l = ((i == n-1) ? m : offsets[i+1])-offsets[i]; v[i].setOutDegree(l); #ifndef WEIGHTED v[i].setOutNeighbors(edges+o); #else v[i].setOutNeighbors(edges+2*o); #endif }} if(!isSymmetric) { uintT* tOffsets = newA(uintT,n); assert(tOffsets != NULL && "Malloc failure\n"); {parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;} #ifndef WEIGHTED intPair* temp = newA(intPair,m); #else intTriple* temp = newA(intTriple,m); #endif assert(temp != NULL && "Malloc failure\n"); {parallel_for(long i=0;i<n;i++){ uintT o = offsets[i]; for(uintT j=0;j<v[i].getOutDegree();j++){ #ifndef WEIGHTED temp[o+j] = make_pair(v[i].getOutNeighbor(j),i); #else temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j))); #endif } }} free(offsets); #ifndef WEIGHTED #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<uintE>()); #else quickSort(temp,m,pairFirstCmp<uintE>()); #endif #else #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<intPair>()); #else quickSort(temp,m,pairFirstCmp<intPair>()); #endif #endif tOffsets[temp[0].first] = 0; #ifndef WEIGHTED uintE* inEdges = newA(uintE,m); inEdges[0] = temp[0].second; #else intE* inEdges = newA(intE,2*m); inEdges[0] = temp[0].second.first; inEdges[1] = temp[0].second.second; #endif assert(inEdges != NULL && "Malloc failure\n"); {parallel_for(long i=1;i<m;i++) { #ifndef WEIGHTED inEdges[i] = temp[i].second; #else inEdges[2*i] = temp[i].second.first; inEdges[2*i+1] = temp[i].second.second; #endif if(temp[i].first != temp[i-1].first) { tOffsets[temp[i].first] = i; } }} free(temp); //fill in offsets of degree 0 vertices by taking closest non-zero //offset to the right sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m); {parallel_for(long i=0;i<n;i++){ uintT o = tOffsets[i]; uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i]; v[i].setInDegree(l); #ifndef WEIGHTED v[i].setInNeighbors(inEdges+o); #else v[i].setInNeighbors(inEdges+2*o); #endif }} free(tOffsets); Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges); std::cout << "Read directed graph. Num Nodes = " << n << " and Num Edges = " << m << "\n"; return graph<vertex>(v,n,m,mem); } else { free(offsets); Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges); std::cout << "Read undirected graph. Num Nodes = " << n << " and Num Edges = " << m << "\n"; return graph<vertex>(v,n,m,mem); } } template <class vertex> graph<vertex> readGraphFromBinary(char* iFile, bool isSymmetric) { char* config = (char*) ".config"; char* adj = (char*) ".adj"; char* idx = (char*) ".idx"; char configFile[strlen(iFile)+strlen(config)+1]; char adjFile[strlen(iFile)+strlen(adj)+1]; char idxFile[strlen(iFile)+strlen(idx)+1]; *configFile = *adjFile = *idxFile = '\0'; strcat(configFile,iFile); strcat(adjFile,iFile); strcat(idxFile,iFile); strcat(configFile,config); strcat(adjFile,adj); strcat(idxFile,idx); ifstream in(configFile, ifstream::in); long n; in >> n; in.close(); ifstream in2(adjFile,ifstream::in | ios::binary); //stored as uints in2.seekg(0, ios::end); long size = in2.tellg(); in2.seekg(0); #ifdef WEIGHTED long m = size/(2*sizeof(uint)); #else long m = size/sizeof(uint); #endif char* s = (char *) malloc(size); in2.read(s,size); in2.close(); uintE* edges = (uintE*) s; ifstream in3(idxFile,ifstream::in | ios::binary); //stored as longs in3.seekg(0, ios::end); size = in3.tellg(); in3.seekg(0); if(n != size/sizeof(intT)) { cout << "File size wrong\n"; abort(); } char* t = (char *) malloc(size); in3.read(t,size); in3.close(); uintT* offsets = (uintT*) t; vertex* v = newA(vertex,n); #ifdef WEIGHTED intE* edgesAndWeights = newA(intE,2*m); {parallel_for(long i=0;i<m;i++) { edgesAndWeights[2*i] = edges[i]; edgesAndWeights[2*i+1] = edges[i+m]; }} //free(edges); #endif {parallel_for(long i=0;i<n;i++) { uintT o = offsets[i]; uintT l = ((i==n-1) ? m : offsets[i+1])-offsets[i]; v[i].setOutDegree(l); #ifndef WEIGHTED v[i].setOutNeighbors((uintE*)edges+o); #else v[i].setOutNeighbors(edgesAndWeights+2*o); #endif }} if(!isSymmetric) { uintT* tOffsets = newA(uintT,n); {parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;} #ifndef WEIGHTED intPair* temp = newA(intPair,m); #else intTriple* temp = newA(intTriple,m); #endif {parallel_for(intT i=0;i<n;i++){ uintT o = offsets[i]; for(uintT j=0;j<v[i].getOutDegree();j++){ #ifndef WEIGHTED temp[o+j] = make_pair(v[i].getOutNeighbor(j),i); #else temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j))); #endif } }} free(offsets); #ifndef WEIGHTED #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<uintE>()); #else quickSort(temp,m,pairFirstCmp<uintE>()); #endif #else #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<intPair>()); #else quickSort(temp,m,pairFirstCmp<intPair>()); #endif #endif tOffsets[temp[0].first] = 0; #ifndef WEIGHTED uintE* inEdges = newA(uintE,m); inEdges[0] = temp[0].second; #else intE* inEdges = newA(intE,2*m); inEdges[0] = temp[0].second.first; inEdges[1] = temp[0].second.second; #endif {parallel_for(long i=1;i<m;i++) { #ifndef WEIGHTED inEdges[i] = temp[i].second; #else inEdges[2*i] = temp[i].second.first; inEdges[2*i+1] = temp[i].second.second; #endif if(temp[i].first != temp[i-1].first) { tOffsets[temp[i].first] = i; } }} free(temp); //fill in offsets of degree 0 vertices by taking closest non-zero //offset to the right sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m); {parallel_for(long i=0;i<n;i++){ uintT o = tOffsets[i]; uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i]; v[i].setInDegree(l); #ifndef WEIGHTED v[i].setInNeighbors((uintE*)inEdges+o); #else v[i].setInNeighbors((intE*)(inEdges+2*o)); #endif }} free(tOffsets); #ifndef WEIGHTED Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges); return graph<vertex>(v,n,m,mem); #else Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights,inEdges); return graph<vertex>(v,n,m,mem); #endif } free(offsets); #ifndef WEIGHTED Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges); return graph<vertex>(v,n,m,mem); #else Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights); return graph<vertex>(v,n,m,mem); #endif } template <class vertex> graph<vertex> readGraph(char* iFile, bool compressed, bool symmetric, bool binary, bool mmap) { if(binary) return readGraphFromBinary<vertex>(iFile,symmetric); else return readGraphFromFile<vertex>(iFile,symmetric,mmap); } template <class vertex> graph<vertex> readCompressedGraph(char* fname, bool isSymmetric, bool mmap) { char* s; if (mmap) { _seq<char> S = mmapStringFromFile(fname); // Cannot mutate graph unless we copy. char *bytes = newA(char, S.n); parallel_for(size_t i=0; i<S.n; i++) { bytes[i] = S.A[i]; } if (munmap(S.A, S.n) == -1) { perror("munmap"); exit(-1); } s = bytes; } else { ifstream in(fname,ifstream::in |ios::binary); in.seekg(0,ios::end); long size = in.tellg(); in.seekg(0); cout << "size = " << size << endl; s = (char*) malloc(size); in.read(s,size); in.close(); } long* sizes = (long*) s; long n = sizes[0], m = sizes[1], totalSpace = sizes[2]; cout << "n = "<<n<<" m = "<<m<<" totalSpace = "<<totalSpace<<endl; cout << "reading file..."<<endl; uintT* offsets = (uintT*) (s+3*sizeof(long)); long skip = 3*sizeof(long) + (n+1)*sizeof(intT); uintE* Degrees = (uintE*) (s+skip); skip+= n*sizeof(intE); uchar* edges = (uchar*)(s+skip); uintT* inOffsets; uchar* inEdges; uintE* inDegrees; if(!isSymmetric){ skip += totalSpace; uchar* inData = (uchar*)(s + skip); sizes = (long*) inData; long inTotalSpace = sizes[0]; cout << "inTotalSpace = "<<inTotalSpace<<endl; skip += sizeof(long); inOffsets = (uintT*) (s + skip); skip += (n+1)*sizeof(uintT); inDegrees = (uintE*)(s+skip); skip += n*sizeof(uintE); inEdges = (uchar*)(s + skip); } else { inOffsets = offsets; inEdges = edges; inDegrees = Degrees; } vertex *V = newA(vertex,n); parallel_for(long i=0;i<n;i++) { long o = offsets[i]; uintT d = Degrees[i]; V[i].setOutDegree(d); V[i].setOutNeighbors(edges+o); } if(sizeof(vertex) == sizeof(compressedAsymmetricVertex)){ parallel_for(long i=0;i<n;i++) { long o = inOffsets[i]; uintT d = inDegrees[i]; V[i].setInDegree(d); V[i].setInNeighbors(inEdges+o); } } cout << "creating graph..."<<endl; Compressed_Mem<vertex>* mem = new Compressed_Mem<vertex>(V, s); graph<vertex> G(V,n,m,mem); return G; } /* prefix sum used by the preprocess function defined below */ static pvector<uintT> ParallelPrefixSum (const pvector<uintT> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<uintT> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { uintT lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<uintT> bulk_prefix(num_blocks+1); uintT total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<uintT> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { uintT local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } template <class vertex> void generateHubClusterMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; pvector<degree_nodeid_t> degree_id_pairs(numVertices); uintT avgDegree = numEdges / numVertices; uintT hubCount {0}; const int PADDING = 64 / sizeof(uintE); uintE* localOffsets = new uintE[omp_get_max_threads() * PADDING](); uintE partitionSz = numVertices / omp_get_max_threads(); #pragma omp parallel { int tid = omp_get_thread_num(); uintE startID = partitionSz * tid; uintE stopID = partitionSz * (tid + 1); if (tid == omp_get_max_threads() - 1) { stopID = numVertices; } for (uintE n = startID; n < stopID; ++n) { vertex vtx = origG[n]; if (useOutdeg) { if (vtx.getOutDegree() > avgDegree) { ++localOffsets[tid * PADDING]; new_ids[n] = 1; } } else { if (vtx.getInDegree() > avgDegree) { ++localOffsets[tid * PADDING]; new_ids[n] = 1; } } } } uintE sum {0}; for (int tid = 0; tid < omp_get_max_threads(); ++tid) { auto origCount = localOffsets[tid * PADDING]; localOffsets[tid * PADDING] = sum; sum += origCount; } /* Step II - assign a remap for the hub vertices first */ #pragma omp parallel { uintE localCtr {0}; int tid = omp_get_thread_num(); uintE startID = partitionSz * tid; uintE stopID = partitionSz * (tid + 1); if (tid == omp_get_max_threads() - 1) { stopID = numVertices; } for (uintE n = startID; n < stopID; ++n) { if (new_ids[n] != UINT_E_MAX) { new_ids[n] = localOffsets[tid * PADDING] + localCtr; ++localCtr; } } } delete[] localOffsets; /* Step III - assigning a remap for (easy) non hub vertices */ auto numHubs = sum; SlidingQueue<uintE> queue(numHubs); #pragma omp parallel { //assert(omp_get_max_threads() == 56); QueueBuffer<uintE> lqueue(queue, numHubs / omp_get_max_threads()); #pragma omp for for (uintE n = numHubs; n < numVertices; ++n) { if (new_ids[n] == UINT_E_MAX) { // This steps preserves the ordering of the original graph (as much as possible) new_ids[n] = n; } else { uintE remappedTo = new_ids[n]; if (new_ids[remappedTo] == UINT_E_MAX) { // safe to swap Ids because the original vertex is a non-hub new_ids[remappedTo] = n; } else { // Cannot swap ids because original vertex was a hub (swapping // would disturb sorted ordering of hubs - not allowed) lqueue.push_back(n); } } } lqueue.flush(); } queue.slide_window(); //the queue keeps a list of vertices where a simple swap of locations is not possible /* Step IV - assigning remaps for remaining non hubs */ uintE unassignedCtr {0}; auto q_iter = queue.begin(); #pragma omp parallel for for (uintE n = 0; n < numHubs; ++n) { if (new_ids[n] == UINT_E_MAX) { uintE u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1)); new_ids[n] = u; } } t.Stop(); t.PrintTime("HubCluster Map Time", t.Seconds()); } template <class vertex> void generateHubSortMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; pvector<degree_nodeid_t> degree_id_pairs(numVertices); uintT avgDegree = numEdges / numVertices; uintT hubCount {0}; /* STEP I - collect degrees of all vertices */ #pragma omp parallel for reduction(+ : hubCount) for (uintE v = 0; v < numVertices; ++v) { vertex vtx = origG[v]; if (useOutdeg) { degree_id_pairs[v] = std::make_pair(vtx.getOutDegree(), v); if (vtx.getOutDegree() > avgDegree) { ++hubCount; } } else { degree_id_pairs[v] = std::make_pair(vtx.getInDegree(), v); if (vtx.getInDegree() > avgDegree) { ++hubCount; } } } /* Step II - sort the degrees in parallel */ __gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_nodeid_t>()); /* Step III - make a remap based on the sorted degree list [Only for hubs] */ #pragma omp parallel for for (uintE n = 0; n < hubCount; ++n) { new_ids[degree_id_pairs[n].second] = n; } //clearing space from degree pairs pvector<degree_nodeid_t>().swap(degree_id_pairs); /* Step IV - assigning a remap for (easy) non hub vertices */ auto numHubs = hubCount; SlidingQueue<uintE> queue(numHubs); #pragma omp parallel { QueueBuffer<uintE> lqueue(queue, numHubs / omp_get_max_threads()); #pragma omp for for (uintE n = numHubs; n < numVertices; ++n) { if (new_ids[n] == UINT_E_MAX) { // This steps preserves the ordering of the original graph (as much as possible) new_ids[n] = n; } else { uintE remappedTo = new_ids[n]; if (new_ids[remappedTo] == UINT_E_MAX) { // safe to swap Ids because the original vertex is a non-hub new_ids[remappedTo] = n; } else { // Cannot swap ids because original vertex was a hub (swapping // would disturb sorted ordering of hubs - not allowed) lqueue.push_back(n); } } } lqueue.flush(); } queue.slide_window(); //the queue keeps a list of vertices where a simple swap of locations is not possible /* Step V - assigning remaps for remaining non hubs */ uintE unassignedCtr {0}; auto q_iter = queue.begin(); #pragma omp parallel for for (uintE n = 0; n < numHubs; ++n) { if (new_ids[n] == UINT_E_MAX) { uintE u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1)); new_ids[n] = u; } } t.Stop(); t.PrintTime("HubSort Map Time", t.Seconds()); } /* Preprocess a graph based on outdegrees or indegrees PageRank-specific Optimizations for directed graphs - 1) We do not create a new outNeighbors list (because it pull-only) 2) We only create new out-degrees because PR uses it during computation */ template <class vertex> graph<vertex> preprocessGraph(graph<vertex> GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank = false, bool isDenseWrite = false, ReorderingAlgo reordering_algo = DBG) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; if (!isSym) { generateMapping(GA, reordering_algo, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); /* Step VI - generate degree list for new graph */ pvector<uintT> degrees(numVertices); pvector<uintT> inv_degrees(numVertices); #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { auto newID = new_ids[v]; if (useOutdeg) { vertex vtx = origG[v]; degrees[newID] = vtx.getOutDegree(); inv_degrees[newID] = vtx.getInDegree(); } else { vertex vtx = origG[v]; degrees[newID] = vtx.getInDegree(); inv_degrees[newID] = vtx.getOutDegree(); } } /* Step VII - make a new vertex list for the new graph */ pvector<uintT> offsets = ParallelPrefixSum(degrees); pvector<uintT> inv_offsets = ParallelPrefixSum(inv_degrees); //clearing space from degree lists pvector<uintT>().swap(degrees); pvector<uintT>().swap(inv_degrees); #ifndef WEIGHTED uintE* outEdges = newA(uintE, numEdges); uintE* inEdges = newA(uintE, numEdges); #else intE* outEdges = newA(intE, 2 * numEdges); intE* inEdges = newA(intE, 2 * numEdges); #endif vertex* newV = newA(vertex, numVertices); #pragma omp parallel for schedule (dynamic, 1024) for (uintE v = 0; v < numVertices; ++v) { /* note that vertex IDs u and v belong to the space of original vertex IDs */ if (!isPageRank) { //copy out-neighbors auto newID = new_ids[v]; newV[newID].setOutDegree(origG[v].getOutDegree()); #ifndef WEIGHTED if (useOutdeg) newV[newID].setOutNeighbors(outEdges + offsets[newID]); else newV[newID].setOutNeighbors(outEdges + inv_offsets[newID]); #else if (useOutdeg) newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]); else newV[newID].setOutNeighbors(outEdges + 2 * inv_offsets[newID]); #endif for (uintE u = 0; u < origG[v].getOutDegree(); ++u) { auto origNgh = origG[v].getOutNeighbor(u); newV[newID].setOutNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setOutWeight(u, origG[v].getOutWeight(u)); #endif } if (!isDenseWrite) { /* for dense-write pushonly apps we dont need in-neighbors */ //copy in-neighbors newV[newID].setInDegree(origG[v].getInDegree()); #ifndef WEIGHTED if (useOutdeg) newV[newID].setInNeighbors(inEdges + inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + offsets[newID]); #else if (useOutdeg) newV[newID].setInNeighbors(inEdges + 2 * inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + 2 * offsets[newID]); #endif for (uintE u = 0; u < origG[v].getInDegree(); ++u) { auto origNgh = origG[v].getInNeighbor(u); newV[newID].setInNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setInWeight(u, origG[v].getInWeight(u)); #endif } } } else { /* PageRank - no need to apply weighted conditionals */ //copy in-neighbors auto newID = new_ids[v]; newV[newID].setInDegree(origG[v].getInDegree()); if (useOutdeg) newV[newID].setInNeighbors(inEdges + inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + offsets[newID]); for (uintE u = 0; u < origG[v].getInDegree(); ++u) { auto origNgh = origG[v].getInNeighbor(u); newV[newID].setInNeighbor(u, new_ids[origNgh]); } //only set out-degrees newV[newID].setOutDegree(origG[v].getOutDegree()); } } /* Step V - make the new graph */ Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges,inEdges); t.Stop(); string s = ReorderingAlgoStr(reordering_algo) + " Total Map Time"; t.PrintTime(s.c_str(), t.Seconds()); return graph<vertex>(newV,numVertices,numEdges,mem); } else { /* undirected graph */ generateMapping(GA, reordering_algo, isSym, true, new_ids, isPageRank, isDenseWrite); /* Step VI - generate degree list for new graph */ pvector<uintT> degrees(numVertices); #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { auto newID = new_ids[v]; vertex vtx = origG[v]; degrees[newID] = vtx.getOutDegree(); } /* Step VII - make a new vertex list for the new graph */ pvector<uintT> offsets = ParallelPrefixSum(degrees); //clearing space from degrees pvector<uintT>().swap(degrees); #ifndef WEIGHTED uintE* outEdges = newA(uintE, numEdges); #else intE* outEdges = newA(intE, 2 * numEdges); #endif vertex* newV = newA(vertex, numVertices); #pragma omp parallel for schedule (dynamic, 1024) for (uintE v = 0; v < numVertices; ++v) { /* note that vertex IDs u and v belong to the space of original vertex IDs */ //copy neighbors auto newID = new_ids[v]; newV[newID].setOutDegree(origG[v].getOutDegree()); #ifndef WEIGHTED newV[newID].setOutNeighbors(outEdges + offsets[newID]); #else newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]); #endif for (uintE u = 0; u < origG[v].getOutDegree(); ++u) { auto origNgh = origG[v].getOutNeighbor(u); newV[newID].setOutNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setOutWeight(u, origG[v].getOutWeight(u)); #endif } } /* Step V - make the new graph */ Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges); t.Stop(); string s = ReorderingAlgoStr(reordering_algo) + " Total Map Time"; t.PrintTime(s.c_str(), t.Seconds()); return graph<vertex>(newV,numVertices,numEdges,mem); } }
MatrixToTensor.h
#ifndef _MatrixToTensor_H #define _MatrixToTensor_H inline void MatrixToTensor(Matrix<dnn_double>& X, tiny_dnn::tensor_t& T, int read_max = -1) { size_t rd_max = read_max < 0 ? X.m : std::min(read_max, X.m); for (int i = 0; i < rd_max; i++) { tiny_dnn::vec_t x; for (int j = 0; j < X.n; j++) { x.push_back(X(i, j)); } T.push_back(x); } } inline void TensorToMatrix(tiny_dnn::tensor_t& T, Matrix<dnn_double>& X) { X = Matrix<dnn_double>(T.size(), T[0].size()); for (int i = 0; i < T.size(); i++) { for (int j = 0; j < T[i].size(); j++) { X(i, j)= T[i][j]; } } } inline tiny_dnn::vec_t label2tensor(size_t lable, int class_max_num) { tiny_dnn::vec_t tmp(class_max_num, 0); if (lable < 0 || lable >= class_max_num) { return tmp; } tmp[lable] = 1; //printf("%d %d:", class_max_num, tmp.size()); //for (int i = 0; i < class_max_num; i++) //{ // printf(" %f", tmp[i]); //} //printf("\n"); return tmp; } tiny_dnn::tensor_t diff_vec(tiny_dnn::tensor_t& X, std::vector<int>& idx, int lag = 1) { tiny_dnn::tensor_t diff; const bool isidx = idx.size() > 0; diff.resize(X.size() - lag); for (int i = 0; i < X.size() - lag; i++) { for (int k = 0; k < X[0].size(); k++) { if (!isidx || isidx && !idx[k]) { float_t z = X[i + lag][k] - X[i][k]; diff[i].push_back(z); } else { diff[i].push_back(X[i + lag][k]); } } } return diff; } tiny_dnn::tensor_t diffinv_vec(tiny_dnn::tensor_t& base, tiny_dnn::tensor_t& X, std::vector<int>& idx, int lag = 1, bool logfnc = false) { tiny_dnn::tensor_t diffinv; diffinv.resize(X.size()); const bool isidx = idx.size() > 0; for (int i = 0; i < X.size(); i++) { diffinv[i].resize(X[0].size(), 0.0); } for (int i = 0; i < X.size(); i++) { for (int k = 0; k < X[0].size(); k++) { if (!isidx || isidx && !idx[k]) { if (i <= lag - 1) { if (logfnc) { diffinv[i][k] = log(base[i][k]); } else { diffinv[i][k] = base[i][k]; } } else { diffinv[i][k] = diffinv[i - lag][k] + X[i - lag][k]; } } else { if (i <= lag - 1) { diffinv[i][k] = base[i][k]; } else { diffinv[i][k] = X[i - lag][k]; } } } } return diffinv; } tiny_dnn::tensor_t log(tiny_dnn::tensor_t& X, std::vector<int>& idx) { tiny_dnn::tensor_t r = X; const bool isidx = idx.size() > 0; #pragma omp parallel for for (int i = 0; i < X.size(); i++) { for (int k = 0; k < X[0].size(); k++) { if (X[i][k] < 0) { printf("ERROR:-------- log ( 0 < x ) --------\n"); } if (!isidx || isidx && !idx[k]) r[i][k] = log(X[i][k]); else r[i][k] = X[i][k]; } } return r; } tiny_dnn::tensor_t exp(tiny_dnn::tensor_t& X, std::vector<int>& idx) { tiny_dnn::tensor_t r = X; const bool isidx = idx.size() > 0; #pragma omp parallel for for (int i = 0; i < X.size(); i++) { for (int k = 0; k < X[0].size(); k++) { if (!isidx || isidx && !idx[k]) r[i][k] = exp(X[i][k]); else r[i][k] = X[i][k]; } } return r; } #endif
matrix_s.h
// // matrix.cpp // Define Class for Vector & Matrix // // Created by Yoshi Miyazaki on 2015/04/11. // #include "matrix.h" /*---------------------------------------- Vector Types Constructers ---------------------------------------*/ template<class T> Vector1d<T>::Vector1d(){ n = 0; v = 0; } template<class T> Vector1d<T>::Vector1d(int nn){ n = nn; v = new T[n]; } template<class T> Vector1d<T>::Vector1d(const T& a, int nn){ n = nn; v = new T[nn]; for (int i=0; i<nn; i++){ v[i] = a; } } template<class T> Vector1d<T>::Vector1d(const T* a, int nn){ n = nn; v = new T[n]; for (int i=0; i<nn; i++){ v[i] = *a++; } } template<class T> Vector1d<T>::Vector1d(const Vector1d<T> &copy){ n = copy.n; v = new T[n]; for (int i=0; i<n; i++){ v[i] = copy[i]; } } /*---------------------------------------- Operater ---------------------------------------*/ template<class T> // Substitution Vector1d<T>& Vector1d<T>::operator=(const Vector1d<T> &copy){ if (this != &copy){ if (n != copy.n){ if (v != 0) delete[] v; n = copy.n; v = new T[n]; } for (int i=0; i<n; i++){ v[i] = copy[i]; } } return *this; } template<class T> // i'th element Vector1d<T>& Vector1d<T>::operator=(const T &a){ for (int i=0; i<n; i++){ v[i] = a; } return *this; } template<class T> const bool Vector1d<T>::operator==(const Vector1d<T>& rhs) const{ if (n != rhs.n){ return 0; } else{ bool b = 1; for (int i=0; i<n; i++){ if (v[i] != rhs[i]){ b = 0; break; } } return b; } } template<class T> void Vector1d<T>::resize(int nn){ if (n != nn){ if (v != 0){ delete[] v; } n = nn; v = new T[n]; } } template<class T> void Vector1d<T>::resize(const T& a, int nn){ if (n != nn){ if (v != 0){ delete[] v; } n = nn; v = new T[n]; } for (int i=0; i<n; i++){ v[i] = a; } } /*---------------------------------------- Mathematical Operater ---------------------------------------*/ template<class T> const T Vector1d<T>::norm() const{ T norm = 0; for (int i=0; i<n; i++){ norm += v[i]*v[i]; } return sqrt(norm); } template<class T> const T Vector1d<T>::maxv() const{ T maxv = v[0]; for (int i=1; i<n; i++){ if (maxv < v[i]){maxv = v[i];} } return maxv; } template<class T> const T Vector1d<T>::minv() const{ T minv = v[0]; for (int i=1; i<n; i++){ if (minv > v[i]){minv = v[i];} } return minv; } template<class T> const T Vector1d<T>::average() const{ T ave = 0; for (int i=0; i<n; i++){ ave += v[i]; } return ave/double(n); } template<class T> /* maximum of abs(v[i]) */ const T Vector1d<T>::absmaxv() const{ T maxv = abs(v[0]); for (int i=1; i<n; i++){ if (maxv < abs(v[i])){maxv = abs(v[i]);} } return maxv; } template<class T> /* minimum of abs(v[i]) */ const T Vector1d<T>::absminv() const{ T minv = abs(v[0]); for (int i=1; i<n; i++){ if (minv > abs(v[i])){minv = abs(v[i]);} } return minv; } template<class T> /* minimum of abs(v[i]) */ const T Vector1d<T>::absnon0minv() const{ T minv = 1e100; for (int i=0; i<n; i++){ if ((minv > abs(v[i])) && (v[i] != 0)){minv = abs(v[i]);} } return minv; } template<class T> /* average of abs(v[i]) */ const T Vector1d<T>::absaverage() const{ T ave = 0; for (int i=0; i<n; i++){ ave += (v[i]>0 ? v[i] : -1.0*v[i]); } return ave/double(n); } template<class T> /* dot product */ const T Vector1d<T>::operator*(const Vector1d<T>& A){ int nA; nA = A.size(); T dotp = 0; if (nA != n){ cout << "size of vectors don't match. Revise your input." << endl; exit(7); } else{ for (int i=0; i<n; i++){ dotp += v[i]*A[i]; } return dotp; } } template<class T> Vector1d<T> Vector1d<T>::operator+(const Vector1d<T>& A){ int nA; nA = A.size(); if (nA != n){ cout << "size of vectors don't match. Revise your input." << endl; exit(7); } else{ Vector1d<double> sum(n); for (int i=0; i<n; i++){ sum[i] = v[i] + A[i]; } return sum; } } template<class T> Vector1d<T> Vector1d<T>::operator-(const Vector1d<T>& A){ int nA; nA = A.size(); if (nA != n){ cout << "size of vectors don't match. Revise your input." << endl; exit(7); } else{ Vector1d<double> sum(n); for (int i=0; i<n; i++){ sum[i] = v[i] - A[i]; } return sum; } } template<class T> Vector1d<T> Vector1d<T>::operator+(const T& A){ Vector1d<double> sum(n); for (int i=0; i<n; i++){ sum[i] = v[i] + A; } return sum; } template<class T> Vector1d<T> Vector1d<T>::operator-(const T& A){ Vector1d<double> sum(n); for (int i=0; i<n; i++){ sum[i] = v[i] - A; } return sum; } template<class T> Vector1d<T> Vector1d<T>::operator*(const T& A){ Vector1d<double> product(n); for (int i=0; i<n; i++){ product[i] = v[i] * A; } return product; } template<class T> Vector1d<T> Vector1d<T>::operator/(const T& A){ Vector1d<double> quotient(n); for (int i=0; i<n; i++){ quotient[i] = v[i] / A; } return quotient; } template<class T> Vector1d<T>& Vector1d<T>::operator+=(const Vector1d<T>& A){ int nA; nA = A.size(); if (nA != n){ cout << "size of vectors don't match. Revise your input." << endl; exit(7); } else{ for (int i=0; i<n; i++){ v[i] += A[i]; } return *this; } } template<class T> Vector1d<T>& Vector1d<T>::operator+=(const T& a){ for (int i=0; i<n; i++){ v[i] += a; } return *this; } template<class T> Vector1d<T>& Vector1d<T>::operator-=(const Vector1d<T>& A){ int nA; nA = A.size(); if (nA != n){ cout << "size of vectors don't match. Revise your input." << endl; exit(7); } else{ for (int i=0; i<n; i++){ v[i] -= A[i]; } return *this; } } template<class T> Vector1d<T>& Vector1d<T>::operator-=(const T& a){ for (int i=0; i<n; i++){ v[i] -= a; } return *this; } template<class T> Vector1d<T>& Vector1d<T>::operator*=(const T& a){ for (int i=0; i<n; i++){ v[i] *= a; } return *this; } template<class T> Vector1d<T>& Vector1d<T>::operator/=(const T& a){ for (int i=0; i<n; i++){ v[i] /= a; } return *this; } /*---------------------------------------- Destructers ---------------------------------------*/ template<class T> Vector1d<T>::~Vector1d<T>(){ if (v != 0){ delete[] (v); } } /*---------------------------------------- Matrix Types Constructers ---------------------------------------*/ template<class T> Matrix<T>::Matrix(){ n = 0; m = 0; v = 0; } template<class T> Matrix<T>::Matrix(int nn, int mm){ n = nn; m = mm; v = new T*[n]; v[0] = new T[m*n]; for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } } template<class T> Matrix<T>::Matrix(const T &a, int nn, int mm){ n = nn; m = mm; v = new T*[n]; v[0] = new T[m*n]; for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] = a; } } } template<class T> Matrix<T>::Matrix(const T *a, int nn, int mm){ n = nn; m = mm; v = new T*[n]; v[0] = new T[m*n]; for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] = *a++; } } } template<class T> Matrix<T>::Matrix(const Matrix &copy){ n = copy.n; m = copy.m; v = new T*[n]; v[0] = new T[m*n]; for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] = copy[i][j]; } } } /*---------------------------------------- Operater ---------------------------------------*/ template<class T> Matrix<T>& Matrix<T>:: operator=(const Matrix<T> &copy){ if (this != &copy){ if (n != copy.n || m != copy.m){ if (v != 0){ delete v[0]; delete v; } n = copy.n; m = copy.m; v = new T*[n]; v[0] = new T[n*m]; } for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] = copy[i][j]; } } } return *this; } template<class T> Matrix<T>& Matrix<T>:: operator=(const T &r){ for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] = r; } } return *this; } template<class T> void Matrix<T>::resize(int nn, int mm){ if (n != nn || m != mm){ if (v != 0){ delete v[0]; delete v; } n = nn; m = mm; v = new T*[n]; v[0] = new T[n*m]; } for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } } template<class T> void Matrix<T>::resize(const T& a, int nn, int mm){ if (n != nn || m != mm){ if (v != 0){ delete v[0]; delete v; } n = nn; m = mm; v = new T*[n]; v[0] = new T[n*m]; } for (int i=1; i<n; i++){ v[i] = v[i-1] + m; } for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] = a; } } } /*---------------------------------------- Return row & column vector ---------------------------------------*/ template<class T> Vector1d<T> Matrix<T>::colvector(const int j){ Vector1d<T> rowv(n); for (int i=0; i<n; i++){ rowv[i] = v[i][j]; } return rowv; } template<class T> Vector1d<T> Matrix<T>::rowvector(const int i){ Vector1d<T> colv(m); for (int j=0; j<m; j++){ colv[j] = v[i][j]; } return colv; } /*---------------------------------------- Mathematical Operater ---------------------------------------*/ template<class T> Matrix<T> Matrix<T>::transpose(){ Matrix<T> tran(m,n); int i,j; for (i=0; i<n; i++){ for (j=0; j<m; j++){ tran[j][i] = v[i][j]; } } return tran; } template<class T> Matrix<T> Matrix<T>::lu_decomp(){ if (m != n){ cout << "unable to calculate the inverse" << endl; exit(25); } Matrix<T> lu(m,m); /* LU decomposition */ for (int i=0; i<m; i++){ /* calculate l_ij */ for (int j=i; j<m; j++){ lu[j][i] = v[j][i]; for (int k=0; k<i; k++){ lu[j][i] -= lu[k][i]*lu[j][k]; } } /* calculate u_ij */ for (int j=i+1; j<m; j++){ lu[i][j] = v[i][j]; for (int k=0; k<i; k++){ lu[i][j] -= lu[k][j]*lu[i][k]; } lu[i][j] /= lu[i][i]; } } return lu; } template<class T> void Matrix<T>::lu_linear(Vector1d<T>& A){ /* calculate solution */ for (int i=0; i<n; i++){ for (int k=0; k<i; k++){ A[i] -= v[i][k]*A[k]; } A[i] /= v[i][i]; } for (int i=n-1; i>=0; i--){ for (int k=i+1; k<n; k++){ A[i] -= v[i][k]*A[k]; } } } template<class T> Matrix<T> Matrix<T>::lu_inverse(){ /* matrix should already been LU decomposed */ if (m != n){ cout << "unable to calculate the inverse" << endl; exit(25); } /* prepare identiy matrix */ Matrix<T> inv(0.0,m,m); for (int i=0; i<m; i++){ inv[i][i] = 1.0; } /* calculate inverse */ for (int j=0; j<m; j++){ for (int i=0; i<n; i++){ for (int k=0; k<i; k++){ inv[i][j] -= v[i][k]*inv[k][j]; } inv[i][j] /= v[i][i]; } for (int i=n-1; i>=0; i--){ for (int k=i+1; k<n; k++){ inv[i][j] -= v[i][k]*inv[k][j]; } } } return inv; } template<class T> Matrix<T>& Matrix<T>::numeric0(double LIM){ /* find abs max value in matrix */ T absmaxv = 0.0; for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ if (abs(v[i][j]) > absmaxv) {absmaxv = abs(v[i][j]);} } } /* drop off all numeric error */ T eps = absmaxv*LIM*16; for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ if (abs(v[i][j]) < eps && v[i][j] != 0){ v[i][j] = 0; } } } return *this; } template<class T> Matrix<T>& Matrix<T>::operator+=(const Matrix<T>& B){ int nB = B.nrows(); int mB = B.mcols(); if ((nB != n) || (mB != m)){ cout << "size of matrixes don't match. Revise your input." << endl; exit(7); } else { for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] += B[i][j]; } } return *this; } } template<class T> Matrix<T>& Matrix<T>::operator-=(const Matrix<T>& B){ int nB = B.nrows(); int mB = B.mcols(); if ((nB != n) || (mB != m)){ cout << "size of matrixes don't match. Revise your input." << endl; exit(7); } else { for (int i=0; i<n; i++){ for (int j=0; j<m; j++){ v[i][j] -= B[i][j]; } } return *this; } } template<class T> Vector1d<T> Matrix<T>::operator*(Vector1d<T> &A){ int nA; nA = A.size(); // cout << n << m << nB << mB << endl; if (nA != m){ cout << "size of matrix & vector don't match. Revise your input. sizes: " << m << " & " << nA << endl; exit(7); } else{ Vector1d<T> product(n); for (int i=0; i<n; i++){ product[i] = 0; for (int k=0; k<m; k++){ product[i] += v[i][k]*A[k]; } } return product; } } template<class T> Matrix<T> Matrix<T>::operator*(Matrix<T> &B){ int nB, mB; nB = B.nrows(); mB = B.mcols(); // cout << n << m << nB << mB << endl; if (nB != m){ cout << "size of 2 matricies don't match. Revise your matrix." << endl; exit(7); } else{ Matrix<T> product(n,mB); int i,j,k; // int NUM_THREADS=omp_get_num_procs(); // omp_set_num_threads(NUM_THREADS); // #pragma omp parallel for private(j,k) for (i=0; i<n; i++){ for (j=0; j<mB; j++){ product[i][j] = 0; for (k=0; k<m; k++){ product[i][j] += v[i][k]*B[k][j]; } } } return product; } } /*---------------------------------------- Destructers ---------------------------------------*/ template<class T> Matrix<T>::~Matrix<T>(){ if (v!=0){ if (m!=0){ delete[] v[0]; } delete[] v; } }
r2_maths.h
/* r2_maths - v0.0 - public domain vector, matrix and quaterion library no warranty implied; use at your own risk Built in the style of: https://github.com/nothings/stb This is written with game development in mind. Do this: #define R2_MATHS_IMPLEMENTATION before you include this file in *one* C or C++ file to create the implementation. // i.e. it should look like this: #include ... #include ... #include ... #define R2_MATHS_IMPLEMENTATION #include "r2_maths.h" Add: to cflags -funroll-loops -fopenmp LICENSE See end of file for license information. */ #ifndef R2_MATHS #define R2_MATHS #ifdef __cplusplus extern "C" { #endif #include <math.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> // If on, this uses custom mat3 and mat4 multiplication // code instead of the geneirc mat_mul function. In testing // this makes the code run consistently fast, whereas without // it you can get a sometimes faster multiply, but the speed // is inconsistent. #define R2_MAT_MUL_LUDICROUS_SPEED 1 #ifndef EPSILON #define EPSILON 0.000000954 #endif #ifndef M_PI #define M_PI 3.141592653589 #endif /** * A 2d vector backed by an array. This type * is only used by vec2. You can * access the value using ->a_vec2 or one of * the struct values ->x ->y */ typedef union u_vec2 { float a_vec2[2]; struct { float x, y; // -- 8 }; } vec2; /** * A vector backed by an array. This type * is used by vec4, quat, vec3 and color. You can * access the value using ->a_vec or one of * the struct values ->x ->y ->z ->w */ typedef union u_vec4 { float a_vec[4]; struct { float x; // 4 float y; // 4 -- 8 float z; // 4 float w; // 4 -- 16 }; } vec3, vec4, quat, color; /** * 3x3 Matrix backed by a flat array. * Access the array with ->a_mat3 or use the format: * m<row><col> * where row and column are zero based * * e.g. m3->m22 for the 3rd row, 3rd column value * m3->a_mat3 for the array with values in order */ typedef union u_mat3 { float a_mat3[9]; struct { // clang-format off float m00; float m10; float m20; // 12 float m01; float m11; float m21; // 12 float m02; float m12; float m22; // 12 -- 36 // clang-format on }; } mat3; /** * 4x4 Matrix backed by a flat array. * Access the array with ->a_mat4 or use the format: * m<row><col> * where row and column are zero based * * e.g. m4->m22 for the 3rd row, 3rd column value * m4->a_mat4 for the array with values in order */ typedef union u_mat4 { float a_mat4[16]; struct { // clang-format off float m00; float m10; float m20; float m30; // 16 float m01; float m11; float m21; float m31; // 16 float m02; float m12; float m22; float m32; // 16 float m03; float m13; float m23; float m33; // 16 -- 64 // clang-format on }; } mat4; /** * Returns true if a and b are within EPSILON * of each other */ bool r2_equals(float a, float b); float deg_to_rad(float d); /** * Generic Matrix Multiply * Raw multiply of any-size matrix against any-size (as long as the rows of the * first one is the same size as the column of the second one). * * You probably want to use mat4_mul or mat3_mul for 4x4 and 3x3 * * (NOTE: This function calls calloc) * * `out` must be the right size of the answer and must be initialized to 0 * * r1,c1 = rows and column size of m1 * r2,c2 = rows and column size of m2 */ void mat_mul(const float *m1, const float *m2, unsigned char r1, unsigned char c1, unsigned char r2, unsigned char c2, float *out); /** * Multiply two 4x4 matrix output to out * if R2_MAT_MUL_LUDICROUS_SPEED is on (the default) this will * do a specfic 4x4 multiply function. * * If R2_MAT_MUL_LUDICROUS_SPEED is off it will call the generic * multiply and use calloc. */ void mat4_mul(const mat4 *m1, const mat4 *m2, mat4 *out); void mat4_transform(const vec4 *p, const mat4 *mat, vec4 *out); /** * Fills an mat4 with an array. It expects an array of values * that are given in sets of 4s one *row* at a time. */ void mat4_set(const float *arry, mat4 *m); void mat4_identity(mat4 *m); /** Create a matrix for opengl perspective projection (set transpose to true) */ void mat4_perspective(float fov, float aspect, float z_near, float z_far, mat4 *out); /** * Useful for view matrix for opengl (set transpose to true) * target and up should be normalized */ void mat4_lookat(const vec4 *pos, const vec4 *target, const vec4 *up, mat4 *out); void mat4_transpose(const mat4 *m1, mat4 *m2); char *mat4_tos(const mat4 *m); /** * Multiply two 3x3 matrix output to out * if R2_MAT_MUL_LUDICROUS_SPEED is off, this will call * calloc (default is on). */ void mat3_mul(const mat3 *m1, const mat3 *m2, mat3 *out); void mat3_identity(mat3 *m); char *mat3_tos(const mat3 *m); // void quat_mat4(const quat *q, mat4 *out); void quat_mul_vec3(const quat *q, const vec3 *v, vec3 *out); void quat_normalize(const quat *q, quat *out); /** Conjigate a quaternion (make negative) */ void quat_conj(const quat *q, quat *out); void quat_mul_quat(const quat *q1, const quat *q2, quat *out); void quat_from_euler(const vec3 *r, quat *q); void quat_rot2q(const vec3 *axis, float theta, quat *out); float quat_magnitude(const quat *q); float quat_length(const quat *q); float quat_dot(const quat *q1, const quat *q2); void quat_sub(const quat *q1, const quat *q2, quat *out); void quat_add(const quat *q1, const quat *q2, quat *out); void quat_identity(quat *q); void quat_zero(quat *q); /** To string a quat (also see vec4_tos) - you need to free */ char *quat_tos(const quat *q); void vec4_normalize(const vec4 *v, vec4 *out); float vec4_dist(const vec4 *v1, const vec4 *v2); float vec4_dist_sqrd(const vec4 *v1, const vec4 *v2); float vec4_length(const vec4 *v); float vec4_dot(const vec4 *v1, const vec4 *v2); void vec4_sqrt(const vec4 *v, vec4 *out); void vec4_abs(const vec4 *v, vec4 *out); void vec4_pow(const vec4 *v, float exp, vec4 *out); void vec4_mul_vec4(const vec4 *v1, const vec4 *v2, vec4 *out); void vec4_mul(const vec4 *v, float fac, vec4 *out); void vec4_div(const vec4 *v, float fac, vec4 *out); void vec4_sub(const vec4 *v1, const vec4 *v2, vec4 *out); void vec4_add(const vec4 *v1, const vec4 *v2, vec4 *out); void vec4_set(const float *ary, vec4 *v); bool vec4_equals(const vec4 *v1, const vec4 *v2); void vec4_zero(vec4 *out); /** * To string a vec4 - you need to free * Will also work for quat, color, and vec3 */ char *vec4_tos(const vec4 *q); void vec3_zero(vec3 *out); bool vec3_equals(const vec3 *v1, const vec3 *v2); void vec3_set(float x, float y, float z, vec3 *v); void vec3_add(const vec3 *v1, const vec3 *v2, vec3 *out); void vec3_sub(const vec3 *v1, const vec3 *v2, vec3 *out); void vec3_div(const vec3 *v, float fac, vec3 *out); void vec3_div_vec3(const vec3 *v1, const vec3 *v2, vec3 *out); void vec3_mul(const vec3 *v, float fac, vec3 *out); void vec3_mul_vec3(const vec3 *v1, const vec3 *v2, vec3 *out); void vec3_pow(const vec3 *v, float exp, vec3 *out); void vec3_cross(const vec3 *v1, const vec3 *v2, vec3 *out); float vec3_dot(const vec3 *v1, const vec3 *v2); float vec3_length_sqrd(const vec3 *v); float vec3_length(const vec3 *v); float vec3_dist_sqrd(const vec3 *v1, const vec3 *v2); float vec3_dist(const vec3 *v1, const vec3 *v2); void vec3_normalize(const vec3 *v, vec3 *out); void vec2_zero(vec2 *out); bool vec2_equals(const vec2 *v1, const vec2 *v2); void vec2_set(float x, float y, vec2 *v); void vec2_add(const vec2 *v1, const vec2 *v2, vec2 *out); void vec2_sub(const vec2 *v1, const vec2 *v2, vec2 *out); void vec2_div(const vec2 *v, float fac, vec2 *out); void vec2_div_vec2(const vec2 *v1, const vec2 *v2, vec2 *out); void vec2_mul(const vec2 *v, float fac, vec2 *out); void vec2_mul_vec2(const vec2 *v1, const vec2 *v2, vec2 *out); void vec2_pow(const vec2 *v, float exp, vec2 *out); float vec2_dot(const vec2 *v1, const vec2 *v2); float vec2_length_sqrd(const vec2 *v); float vec2_length(const vec2 *v); float vec2_dist_sqrd(const vec2 *v1, const vec2 *v2); float vec2_dist(const vec2 *v1, const vec2 *v2); void vec2_normalize(const vec2 *v, vec2 *out); void vec2_to_array(const vec2 *v, float *out); #ifdef R2_MATHS_IMPLEMENTATION /////////////////////////////////////////////////////////////// // FNS bool r2_equals(float a, float b) { return fabs(a - b) < EPSILON; } static float __g_pi_deg = M_PI / 180.f; float deg_to_rad(float d) { return d * __g_pi_deg; } /////////////////////////////////////////////////////////////// // Vec2 void vec2_zero(vec2 *out) { out->x = 0.; out->y = 0.; } bool vec2_equals(const vec2 *v1, const vec2 *v2) { return r2_equals(v1->x, v2->x) && r2_equals(v1->y, v2->y); } void vec2_set(float x, float y, vec2 *v) { v->x = x; v->y = y; } void vec2_add(const vec2 *v1, const vec2 *v2, vec2 *out) { out->x = v1->x + v2->x; out->y = v1->y + v2->y; } void vec2_sub(const vec2 *v1, const vec2 *v2, vec2 *out) { out->x = v1->x - v2->x; out->y = v1->y - v2->y; } void vec2_div(const vec2 *v, float fac, vec2 *out) { float d = 1 / ((fac == 0) ? 1 : fac); out->x = v->x * d; out->y = v->y * d; } void vec2_div_vec2(const vec2 *v1, const vec2 *v2, vec2 *out) { out->x = v1->x / ((v2->x == 0) ? 1 : v2->x); out->y = v1->y / ((v2->y == 0) ? 1 : v2->y); } void vec2_mul(const vec2 *v, float fac, vec2 *out) { out->x = v->x * fac; out->y = v->y * fac; } void vec2_mul_vec2(const vec2 *v1, const vec2 *v2, vec2 *out) { out->x = v1->x * v2->x; out->y = v1->y * v2->y; } void vec2_pow(const vec2 *v, float exp, vec2 *out) { out->x = pow(v->x, exp); out->y = pow(v->y, exp); } float vec2_dot(const vec2 *v1, const vec2 *v2) { return (v1->x * v2->x) + (v1->y * v2->y); } float vec2_length_sqrd(const vec2 *v) { float length = 0.0; length += v->x * v->x; length += v->y * v->y; return length; } float vec2_length(const vec2 *v) { return sqrt(vec2_length_sqrd(v)); } float vec2_dist_sqrd(const vec2 *v1, const vec2 *v2) { return (v1->x - v2->x) * (v1->x - v2->x) + (v1->y - v2->y) * (v1->y - v2->y); } float vec2_dist(const vec2 *v1, const vec2 *v2) { return sqrt(vec2_dist_sqrd(v1, v2)); } void vec2_normalize(const vec2 *v, vec2 *out) { float len = vec2_length(v); vec2_div(v, len, out); } void vec2_to_array(const vec2 *v, float *out) { out[0] = v->x; out[1] = v->y; } /////////////////////////////////////////////////////////////// // Vec3 void vec3_zero(vec3 *out) { out->x = 0.; out->y = 0.; out->z = 0.; } bool vec3_equals(const vec3 *v1, const vec3 *v2) { return r2_equals(v1->x, v2->x) && r2_equals(v1->y, v2->y) && r2_equals(v1->z, v2->z); } void vec3_set(float x, float y, float z, vec3 *v) { v->x = x; v->y = y; v->z = z; } void vec3_add(const vec3 *v1, const vec3 *v2, vec3 *out) { out->x = v1->x + v2->x; out->y = v1->y + v2->y; out->z = v1->z + v2->z; } void vec3_sub(const vec3 *v1, const vec3 *v2, vec3 *out) { out->x = v1->x - v2->x; out->y = v1->y - v2->y; out->z = v1->z - v2->z; } void vec3_div(const vec3 *v, float fac, vec3 *out) { float d = 1 / ((fac == 0) ? 1 : fac); out->x = v->x * d; out->y = v->y * d; out->z = v->z * d; } void vec3_div_vec3(const vec3 *v1, const vec3 *v2, vec3 *out) { out->x = v1->x / ((v2->x == 0) ? 1 : v2->x); out->y = v1->y / ((v2->y == 0) ? 1 : v2->y); out->z = v1->z / ((v2->z == 0) ? 1 : v2->z); } void vec3_mul(const vec3 *v, float fac, vec3 *out) { out->x = v->x * fac; out->y = v->y * fac; out->z = v->z * fac; } void vec3_mul_vec3(const vec3 *v1, const vec3 *v2, vec3 *out) { out->x = v1->x * v2->x; out->y = v1->y * v2->y; out->z = v1->z * v2->z; } void vec3_pow(const vec3 *v, float exp, vec3 *out) { out->x = pow(v->x, exp); out->y = pow(v->y, exp); out->z = pow(v->z, exp); } float vec3_dot(const vec3 *v1, const vec3 *v2) { return (v1->x * v2->x) + (v1->y * v2->y) + (v1->z * v2->z); } void vec3_cross(const vec3 *v1, const vec3 *v2, vec3 *out) { out->x = (v1->y * v2->z) - (v1->z * v2->y); out->y = (v1->z * v2->x) - (v1->x * v2->z); out->z = (v1->x * v2->y) - (v1->y * v2->x); } float vec3_length_sqrd(const vec3 *v) { float length = v->x * v->x + v->y * v->y + v->z * v->z; return length; } float vec3_length(const vec3 *v) { return sqrt(vec3_length_sqrd(v)); } float vec3_dist_sqrd(const vec3 *v1, const vec3 *v2) { return (v1->x - v2->x) * (v1->x - v2->x) + (v1->y - v2->y) * (v1->y - v2->y) + (v1->z - v2->z) * (v1->z - v2->z); } float vec3_dist(const vec3 *v1, const vec3 *v2) { return sqrt(vec3_dist_sqrd(v1, v2)); } void vec3_normalize(const vec3 *v, vec3 *out) { float len = vec3_length(v); if (len == 0.0) return vec3_zero(out); else return vec3_div(v, len, out); } /////////////////////////////////////////////////////////////// // Vec4 void vec4_zero(vec4 *out) { out->x = 0.; out->y = 0.; out->z = 0.; out->w = 1.; } bool vec4_equals(const vec4 *v1, const vec4 *v2) { return r2_equals(v1->x, v2->x) && r2_equals(v1->y, v2->y) && r2_equals(v1->z, v2->z) && r2_equals(v1->w, v2->w); } void vec4_set(const float *ary, vec4 *v) { // what could possibly go wrong? v->x = ary[0]; v->y = ary[1]; v->z = ary[2]; v->w = ary[3]; } void vec4_add(const vec4 *v1, const vec4 *v2, vec4 *out) { out->x = v1->x + v2->x; out->y = v1->y + v2->y; out->z = v1->z + v2->z; out->w = v1->w + v2->w; } void vec4_sub(const vec4 *v1, const vec4 *v2, vec4 *out) { out->x = v1->x - v2->x; out->y = v1->y - v2->y; out->z = v1->z - v2->z; out->w = v1->w - v2->w; } void vec4_div(const vec4 *v, float fac, vec4 *out) { float d = 1 / ((fac == 0) ? 1 : fac); out->x = v->x * d; out->y = v->y * d; out->z = v->z * d; out->w = v->w * d; } void vec4_mul(const vec4 *v, float fac, vec4 *out) { out->x = v->x * fac; out->y = v->y * fac; out->z = v->z * fac; out->w = v->w * fac; } void vec4_mul_vec4(const vec4 *v1, const vec4 *v2, vec4 *out) { out->x = v1->x * v2->x; out->y = v1->y * v2->y; out->z = v1->z * v2->z; out->w = v1->w * v2->w; } void vec4_pow(const vec4 *v, float exp, vec4 *out) { out->x = pow(v->x, exp); out->y = pow(v->y, exp); out->z = pow(v->z, exp); out->w = pow(v->w, exp); } void vec4_abs(const vec4 *v, vec4 *out) { out->x = fabs(v->x); out->y = fabs(v->y); out->z = fabs(v->z); out->w = fabs(v->w); } void vec4_sqrt(const vec4 *v, vec4 *out) { out->x = sqrt(v->x); out->y = sqrt(v->y); out->z = sqrt(v->z); out->w = sqrt(v->w); } float vec4_dot(const vec4 *v1, const vec4 *v2) { return (v1->x * v2->x) + (v1->y * v2->y) + (v1->z * v2->z) + (v1->w * v2->w); } float vec4_length(const vec4 *v) { return sqrtf(v->x * v->x + v->y * v->y + v->z * v->z + v->w * v->w); } float vec4_dist_sqrd(const vec4 *v1, const vec4 *v2) { return (v1->x - v2->x) * (v1->x - v2->x) + (v1->y - v2->y) * (v1->y - v2->y) + (v1->y - v2->z) * (v1->y - v2->z) + (v1->y - v2->w) * (v1->y - v2->w); } float vec4_dist(const vec4 *v1, const vec4 *v2) { return sqrt(vec4_dist_sqrd(v1, v2)); } void vec4_normalize(const vec4 *v, vec4 *out) { float mag = vec4_length(v); if (mag < EPSILON) return vec4_zero(out); else { float d = 1 / mag; out->x = v->x * d; out->y = v->y * d; out->z = v->z * d; out->w = v->w * d; } } char *vec4_tos(const quat *q) { char *out = calloc(sizeof(char), 60); snprintf(out, 50, "(%f, %f, %f, %f)\n", q->x, q->y, q->z, q->w); return out; } /////////////////////////////////////////////////////////////// // Quat // http://www.tobynorris.com/work/prog/csharp/quatview/help/orientations_and_quaternions.htm void quat_zero(quat *q) { q->x = 0.; q->y = 0.; q->z = 0.; q->w = 0.; } void quat_identity(quat *q) { q->x = 0.; q->y = 0.; q->z = 0.; q->w = 0.; } void quat_add(const quat *q1, const quat *q2, quat *out) { vec4_add(q1, q2, out); } void quat_sub(const quat *q1, const quat *q2, quat *out) { vec4_sub(q1, q2, out); } float quat_dot(const quat *q1, const quat *q2) { return vec4_dot(q1, q2); } float quat_length(const quat *q) { return vec4_length(q); } float quat_magnitude(const quat *q) { return quat_length(q); } void quat_rot2q(const vec3 *axis, float theta, quat *out) { vec4_mul(axis, sinf(theta * .5), out); float d = cosf(theta * .5); out->w = d; } // Given a set of euler angles (in radians) create a quaterion void quat_from_euler(const vec3 *r, quat *q) { float roll = r->x; float pitch = r->y; float yaw = r->z; float cy = cosf(yaw * .5); float sy = sinf(yaw * .5); float cp = cosf(pitch * .5); float sp = sinf(pitch * .5); float cr = cosf(roll * .5); float sr = sinf(roll * .5); // Warning: in most maths apps and documentation about quats the w // part (real) is in the front. We have some names here that make it // a bit more difficult to see what is going on. q->w = cr * cp * cy + sr * sp * sy; // r q->x = sr * cp * cy - cr * sp * sy; // i q->y = cr * sp * cy + sr * cp * sy; // j q->z = cr * cp * sy - sr * sp * cy; // k } void quat_mul_quat(const quat *q1, const quat *q2, quat *out) { // i^2 = j^2 = k^2 = ijk = -1 // q1 = a+bi+cj+dk float a = q1->w; float b = q1->x; float c = q1->y; float d = q1->z; // q2 = e+fi+gj+hk float e = q2->w; float f = q2->x; float g = q2->y; float h = q2->z; // | i j k //-------------- // i | -1 k -j // j | -k -1 i // k | j -i -1 out->w = a * e - b * f - c * g - d * h; out->x = a * f + b * e + c * h - d * g; out->y = a * g - b * h + c * e + d * f; out->z = a * h + b * g - c * f + d * e; } void quat_conj(const quat *q, quat *out) { out->w = q->w; out->x = -q->x; out->y = -q->y; out->z = -q->z; } char *quat_tos(const quat *q) { char *out = calloc(sizeof(char), 100); snprintf(out, 100, "[%f + %fi + %fj + %fk]\n", q->w, q->x, q->y, q->z); return out; } void quat_normalize(const quat *q, quat *out) { return vec4_normalize(q, out); } void quat_mul_vec3(const quat *q, const vec3 *v, vec3 *out) { vec3 work = {.x = 0., .y = 0., .z = 0.}; quat inv = {.x = 0., .y = 0., .z = 0., .w = 0.}; quat_conj(q, &inv); quat_mul_quat(q, v, &work); quat_mul_quat(&work, &inv, out); } // void quat_mat4(const quat *q, mat4 *out) // { // float a = q->w; // float b = q->x; // float c = q->y; // float d = q->z; // // a -b -c -d // // b a -d c // // c d a -b // // d -c b a // out->m00 = a; // out->m01 = b; // out->m02 = c; // out->m03 = d; // out->m10 = -b; // out->m11 = a; // out->m12 = d; // out->m13 = -c; // out->m20 = -c; // out->m21 = -d; // out->m22 = a; // out->m23 = b; // out->m30 = -d; // out->m31 = c; // out->m32 = -b; // out->m33 = a; // } /////////////////////////////////////////////////////////////// // Mat4 void mat4_identity(mat4 *m) { // clang-format off m->m00 = 1.; m->m10 = 0.; m->m20 = 0.; m->m30 = 0.; m->m01 = 0.; m->m11 = 1.; m->m21 = 0.; m->m31 = 0.; m->m02 = 0.; m->m12 = 0.; m->m22 = 1.; m->m32 = 0.; m->m03 = 0.; m->m13 = 0.; m->m23 = 0.; m->m33 = 1.; // clang-format on } void mat4_set(const float *arry, mat4 *m) { m->m00 = arry[0]; m->m01 = arry[4]; m->m02 = arry[8]; m->m03 = arry[12]; m->m10 = arry[1]; m->m11 = arry[5]; m->m12 = arry[9]; m->m13 = arry[13]; m->m20 = arry[2]; m->m21 = arry[6]; m->m22 = arry[10]; m->m23 = arry[14]; m->m30 = arry[3]; m->m31 = arry[7]; m->m32 = arry[11]; m->m33 = arry[15]; } void mat4_transform(const vec4 *p, const mat4 *mat, vec4 *out) { out->x = (mat->m00 * p->x) + (mat->m01 * p->y) + (mat->m02 * p->z) + (mat->m03 * p->w); out->y = (mat->m10 * p->x) + (mat->m11 * p->y) + (mat->m12 * p->z) + (mat->m13 * p->w); out->z = (mat->m20 * p->x) + (mat->m21 * p->y) + (mat->m22 * p->z) + (mat->m23 * p->w); out->w = (mat->m30 * p->x) + (mat->m31 * p->y) + (mat->m32 * p->z) + (mat->m33 * p->w); } void mat4_mul(const mat4 *m1, const mat4 *m2, mat4 *out) { #if !R2_MAT_MUL_LUDICROUS_SPEED mat_mul(m1->a_mat4, m2->a_mat4, 4, 4, 4, 4, out->a_mat4); #else // unrolling the loops makes this function faster // so if you're keen you can use the -funroll-loops gcc flag. // I am too lazy to unroll this by hand at the moment; PRs welcome // // 10 runs of 10000 multiplies (average time in seconds): // -funroll-all-loops -funroll-loops looping // 0.0018666 0.0018094 0.0019127 unsigned char i, j; float row[4]; float col[4]; // #pragma omp parallel for simd collapse(2) #pragma omp simd collapse(2) for (i = 0; i < 16; i += 4) { for (j = 0; j < 4; j++) { // Row row[0] = m1->a_mat4[i + 0]; row[1] = m1->a_mat4[i + 1]; row[2] = m1->a_mat4[i + 2]; row[3] = m1->a_mat4[i + 3]; // Column col[0] = m2->a_mat4[j + 0]; col[1] = m2->a_mat4[j + 4]; col[2] = m2->a_mat4[j + 8]; col[3] = m2->a_mat4[j + 12]; // clang-format off out->a_mat4[i + j] = row[0] * col[0] + row[1] * col[1] + row[2] * col[2] + row[3] * col[3]; // clang-format on } } #endif } void mat4_perspective(float fov, float aspect, float near, float far, mat4 *out) { /*** * n/r 0 0 0 * 0 n/t 0 0 * 0 0 -(f+n)/f-n -2fn/f-n * 0 0 -1 0 */ float range = tan(fov / 2) * near; float Sx = (2 * near) / (range * aspect + range * aspect); float Sy = near / range; float Sz = -(far + near) / (far - near); float Pz = -(2 * far * near) / (far - near); // clang-format off out->m00 = Sx; out->m10 = 0; out->m20 = 0; out->m30 = 0; out->m01 = 0; out->m11 = Sy; out->m21 = 0; out->m31 = 0; out->m02 = 0; out->m12 = 0; out->m22 = Sz; out->m32 = Pz; out->m03 = 0; out->m13 = 0; out->m23 = -1; out->m33 = 0; // clang-format on } void mat4_lookat(const vec4 *pos, const vec4 *target, const vec4 *up, mat4 *out) { /** * [fur] * [pos] * rx ux fx 0 1 0 0 -px * ry uy fy 0 * 0 1 0 -py * rz uz fz 0 0 0 1 -pz * 0 0 0 1 0 0 0 1 */ vec4 right = {.x = 0, .y = 0, .z = 0, .w = 0}; vec3_cross(target, up, &right); mat4_identity(out); const vec4 *u = up; const vec4 *f = target; const vec4 *r = &right; // clang-format off out->m00 = r->x; out->m10 = r->y; out->m20 = r->z; out->m30 = -pos->x; out->m01 = u->x; out->m11 = u->y; out->m21 = u->z; out->m31 = -pos->y; out->m02 = -f->x; out->m12 = -f->y; out->m22 = f->z; out->m32 = -pos->z; out->m03 = 0; out->m13 = 0; out->m23 = 0; out->m33 = 1; // clang-format on } char *mat4_tos(const mat4 *m) { char *out = calloc(sizeof(char), 300); // clang-format off snprintf(out, 300, "[\n %f, %f, %f, %f \n %f, %f, %f, %f \n %f, %f, %f, %f \n %f, %f, %f, %f \n]\n", m->m00, m->m10, m->m20, m->m30, m->m01, m->m11, m->m21, m->m31, m->m02, m->m12, m->m22, m->m32, m->m03, m->m13, m->m23, m->m33 ); // clang-format on return out; } void mat4_transpose(const mat4 *m1, mat4 *m2) { const float a = m1->m00; const float b = m1->m10; const float c = m1->m20; const float d = m1->m30; const float e = m1->m01; const float f = m1->m11; const float g = m1->m21; const float h = m1->m31; const float i = m1->m02; const float j = m1->m12; const float k = m1->m22; const float l = m1->m32; const float m = m1->m03; const float n = m1->m13; const float o = m1->m23; const float p = m1->m33; // clang-format off m2->m00 = a; m2->m10 = e; m2->m20 = i; m2->m30 = m; m2->m01 = b; m2->m11 = f; m2->m21 = j; m2->m31 = n; m2->m02 = c; m2->m12 = g; m2->m22 = k; m2->m32 = o; m2->m03 = d; m2->m13 = h; m2->m23 = l; m2->m33 = p; // clang-format on } /////////////////////////////////////////////////////////////// // Mat3 void mat3_identity(mat3 *m) { // clang-format off m->m00 = 1.; m->m10 = 0.; m->m20 = 0.; m->m01 = 0.; m->m11 = 1.; m->m21 = 0.; m->m02 = 0.; m->m12 = 0.; m->m22 = 1.; // clang-format on } // Multiply two 3x3 matrix output to out void mat3_mul(const mat3 *m1, const mat3 *m2, mat3 *out) { #if !R2_MAT_MUL_LUDICROUS_SPEED mat_mul(m1->a_mat3, m2->a_mat3, 3, 3, 3, 3, out->a_mat3); #else // unrolling the loops makes this function faster // so if you're keen you can use the -funroll-loops gcc flag. // I am too lazy to unroll this by hand at the moment; PRs welcome // // 10 runs of 10000 multiplies (average time in seconds): // -funroll-all-loops -funroll-loops looping // 0.0018666 0.0018094 0.0019127 unsigned char i, j; float row[4]; float col[4]; // #pragma omp parallel for simd collapse(2) #pragma omp simd collapse(2) for (i = 0; i < 9; i += 3) { for (j = 0; j < 3; j++) { // Row row[0] = m1->a_mat3[i + 0]; row[1] = m1->a_mat3[i + 1]; row[2] = m1->a_mat3[i + 2]; // Column col[0] = m2->a_mat3[j + 0]; col[1] = m2->a_mat3[j + 3]; col[2] = m2->a_mat3[j + 6]; // clang-format off out->a_mat3[i + j] = row[0] * col[0] + row[1] * col[1] + row[2] * col[2]; // clang-format on } } #endif } char *mat3_tos(const mat3 *m) { char *out = calloc(sizeof(char), 300); // clang-format off snprintf(out, 300, "[\n %f, %f, %f \n %f, %f, %f \n %f, %f, %f \n]\n", m->m00, m->m10, m->m20, m->m01, m->m11, m->m21, m->m02, m->m12, m->m22 ); // clang-format on return out; } /////////////////////////////////////////////////////////////// // Generic Matrix Multiply void mat_mul(const float *m1, const float *m2, unsigned char r1, unsigned char c1, unsigned char r2, unsigned char c2, float *out) { if (c1 != r2) { // Error ("column size of m1 must match row size of m2"); return; } float *row = (float *)calloc(sizeof(float), r1); float *col = (float *)calloc(sizeof(float), c2); unsigned char i, r, j, c; // Loop over each row of the first matrix for (i = 0; i < r1; i++) { // Load a single Row for (r = 0; r < c1; r++) { row[r] = m1[r + i * c1]; } // Loop over the columns to use when multiplying // against the row loaded above #pragma omp simd collapse(2) for (j = 0; j < c2; j++) { for (c = 0; c < r2; c++) { // Load a single column col[c] = m2[j + c * c2]; // v = E row * col out[j + i * r1] += row[c] * col[c]; } } } free(row); free(col); } #endif /* implementation */ #ifdef __cplusplus } #endif #endif /* R2_MATHS */ /* revision history: 0.0 (2020-09-09) Initial bits */ /* ------------------------------------------------------------------------------ This software is available under 2 licenses -- choose whichever you prefer. ------------------------------------------------------------------------------ ALTERNATIVE A - MIT License Copyright (c) 2020 Rob Rohan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ ALTERNATIVE B - Public Domain (www.unlicense.org) This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------ */
phonon.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stddef.h> #include "dynmat.h" #include "phonon.h" #include "lapack_wrapper.h" static long collect_undone_grid_points(long *undone, char *phonon_done, const long num_grid_points, const long *grid_points); static void get_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const char uplo); static void get_gonze_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda, const char uplo); static void get_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double unit_conversion_factor); static void get_gonze_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda); static void get_dynamical_matrix(lapack_complex_double *dynmat, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], /* Wang NAC unless NULL */ const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor); static void get_charge_sum(double (*charge_sum)[3][3], const long num_patom, const long num_satom, const double q[3], const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor); static long needs_nac(const double (*born)[3][3], const long (*grid_address)[3], const long gp, const double *q_direction); void phn_get_phonons_at_gridpoints(double *frequencies, lapack_complex_double *eigenvectors, char *phonon_done, const long num_phonons, const long *grid_points, const long num_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, /* must be pointer */ const double nac_factor, const char uplo) { long num_undone; long *undone; undone = (long *)malloc(sizeof(long) * num_phonons); num_undone = collect_undone_grid_points(undone, phonon_done, num_grid_points, grid_points); get_undone_phonons(frequencies, eigenvectors, undone, num_undone, grid_address, QDinv, fc2, svecs_fc2, multi_fc2, num_patom, num_satom, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, reciprocal_lattice, q_direction, nac_factor, uplo); free(undone); undone = NULL; } void phn_get_gonze_phonons_at_gridpoints(double *frequencies, lapack_complex_double *eigenvectors, char *phonon_done, const long num_phonons, const long *grid_points, const long num_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, /* pointer */ const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda, const char uplo) { long num_undone; long *undone; undone = (long *)malloc(sizeof(long) * num_phonons); num_undone = collect_undone_grid_points(undone, phonon_done, num_grid_points, grid_points); get_gonze_undone_phonons(frequencies, eigenvectors, undone, num_undone, grid_address, QDinv, fc2, svecs_fc2, multi_fc2, positions, num_patom, num_satom, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, reciprocal_lattice, q_direction, nac_factor, dd_q0, G_list, num_G_points, lambda, uplo); free(undone); undone = NULL; } static long collect_undone_grid_points(long *undone, char *phonon_done, const long num_grid_points, const long *grid_points) { long i, gp, num_undone; num_undone = 0; for (i = 0; i < num_grid_points; i++) { gp = grid_points[i]; if (phonon_done[gp] == 0) { undone[num_undone] = gp; num_undone++; phonon_done[gp] = 1; } } return num_undone; } static void get_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const char uplo) { long i, j, gp, num_band; long is_nac, info; double q[3]; double *freqs_tmp; num_band = num_patom * 3; #ifdef PHPYOPENMP #pragma omp parallel for private(j, q, gp, is_nac) #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; for (j = 0; j < 3; j++) { q[j] = QDinv[j][0] * grid_address[gp][0] + QDinv[j][1] * grid_address[gp][1] + QDinv[j][2] * grid_address[gp][2]; } is_nac = needs_nac(born, grid_address, gp, q_direction); get_phonons(eigenvectors + num_band * num_band * gp, q, fc2, masses_fc2, p2s_fc2, s2p_fc2, multi_fc2, num_patom, num_satom, svecs_fc2, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor, unit_conversion_factor); } /* To avoid multithreaded BLAS in OpenMP loop */ #ifdef PHPYOPENMP #ifndef MULTITHREADED_BLAS #pragma omp parallel for private(j, gp, freqs_tmp, info) #endif #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; freqs_tmp = frequencies + num_band * gp; /* Store eigenvalues in freqs array. */ /* Eigenvectors are overwritten on eigvecs array. */ info = phonopy_zheev(freqs_tmp, eigenvectors + num_band * num_band * gp, num_band, uplo); /* Sqrt of eigenvalues are re-stored in freqs array.*/ for (j = 0; j < num_band; j++) { freqs_tmp[j] = sqrt(fabs(freqs_tmp[j])) * ((freqs_tmp[j] > 0) - (freqs_tmp[j] < 0)) * unit_conversion_factor; } } } static void get_gonze_undone_phonons(double *frequencies, lapack_complex_double *eigenvectors, const long *undone_grid_points, const long num_undone_grid_points, const long (*grid_address)[3], const double QDinv[3][3], const double *fc2, const double (*svecs_fc2)[3], const long (*multi_fc2)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double *masses_fc2, const long *p2s_fc2, const long *s2p_fc2, const double unit_conversion_factor, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda, const char uplo) { long i, j, gp, num_band; long is_nac, info; double q[3]; double *freqs_tmp; num_band = num_patom * 3; #ifdef PHPYOPENMP #pragma omp parallel for private(j, q, gp, is_nac) #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; for (j = 0; j < 3; j++) { q[j] = QDinv[j][0] * grid_address[gp][0] + QDinv[j][1] * grid_address[gp][1] + QDinv[j][2] * grid_address[gp][2]; } is_nac = needs_nac(born, grid_address, gp, q_direction); get_gonze_phonons(eigenvectors + num_band * num_band * gp, q, fc2, masses_fc2, p2s_fc2, s2p_fc2, multi_fc2, positions, num_patom, num_satom, svecs_fc2, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor, dd_q0, G_list, num_G_points, lambda); } /* To avoid multithreaded BLAS in OpenMP loop */ #ifdef PHPYOPENMP #ifndef MULTITHREADED_BLAS #pragma omp parallel for private(j, gp, freqs_tmp, info) #endif #endif for (i = 0; i < num_undone_grid_points; i++) { gp = undone_grid_points[i]; /* Store eigenvalues in freqs array. */ /* Eigenvectors are overwritten on eigvecs array. */ freqs_tmp = frequencies + num_band * gp; info = phonopy_zheev(freqs_tmp, eigenvectors + num_band * num_band * gp, num_band, uplo); /* Sqrt of eigenvalues are re-stored in freqs array.*/ for (j = 0; j < num_band; j++) { freqs_tmp[j] = sqrt(fabs(freqs_tmp[j])) * ((freqs_tmp[j] > 0) - (freqs_tmp[j] < 0)) * unit_conversion_factor; } } } static void get_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double unit_conversion_factor) { /* Store dynamical matrix in eigvecs array. */ get_dynamical_matrix(eigvecs, q, fc2, masses, p2s, s2p, multi, num_patom, num_satom, svecs, is_nac, born, dielectric, reciprocal_lattice, q_direction, nac_factor); } static void get_gonze_phonons(lapack_complex_double *eigvecs, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const double (*positions)[3], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor, const double *dd_q0, const double (*G_list)[3], const long num_G_points, const double lambda) { long i, j, k, l, adrs, num_band; double mm; double q_cart[3]; double *q_dir_cart; lapack_complex_double *dd; dd = NULL; q_dir_cart = NULL; num_band = num_patom * 3; dym_get_dynamical_matrix_at_q((double *)eigvecs, num_patom, num_satom, fc2, q, svecs, multi, masses, s2p, p2s, NULL, 0); dd = (lapack_complex_double *) malloc(sizeof(lapack_complex_double) * num_band * num_band); for (i = 0; i < 3; i++) { q_cart[i] = 0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q[j]; } } if (q_direction) { q_dir_cart = (double *)malloc(sizeof(double) * 3); for (i = 0; i < 3; i++) { q_dir_cart[i] = 0; for (j = 0; j < 3; j++) { q_dir_cart[i] += reciprocal_lattice[i][j] * q_direction[j]; } } } dym_get_recip_dipole_dipole((double *)dd, dd_q0, G_list, num_G_points, num_patom, q_cart, q_dir_cart, born, dielectric, positions, nac_factor, lambda, 1e-5); if (q_direction) { free(q_dir_cart); q_dir_cart = NULL; } for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { mm = sqrt(masses[i] * masses[j]); for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l; eigvecs[adrs] = lapack_make_complex_double( lapack_complex_double_real(eigvecs[adrs]) + lapack_complex_double_real(dd[adrs]) / mm, lapack_complex_double_imag(eigvecs[adrs]) + lapack_complex_double_imag(dd[adrs]) / mm); } } } } free(dd); dd = NULL; } static void get_dynamical_matrix(lapack_complex_double *dynmat, const double q[3], const double *fc2, const double *masses, const long *p2s, const long *s2p, const long (*multi)[2], const long num_patom, const long num_satom, const double (*svecs)[3], const long is_nac, const double (*born)[3][3], /* Wang NAC unless NULL */ const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor) { double(*charge_sum)[3][3]; charge_sum = NULL; if (is_nac) { charge_sum = (double(*)[3][3]) malloc(sizeof(double[3][3]) * num_patom * num_patom * 9); get_charge_sum(charge_sum, num_patom, num_satom, q, born, dielectric, reciprocal_lattice, q_direction, nac_factor); } dym_get_dynamical_matrix_at_q((double *)dynmat, num_patom, num_satom, fc2, q, svecs, multi, masses, s2p, p2s, charge_sum, 0); if (is_nac) { free(charge_sum); charge_sum = NULL; } } static void get_charge_sum(double (*charge_sum)[3][3], const long num_patom, const long num_satom, const double q[3], const double (*born)[3][3], const double dielectric[3][3], const double reciprocal_lattice[3][3], const double *q_direction, const double nac_factor) { long i, j; double inv_dielectric_factor, dielectric_factor, tmp_val; double q_cart[3]; if (q_direction) { for (i = 0; i < 3; i++) { q_cart[i] = 0.0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q_direction[j]; } } } else { for (i = 0; i < 3; i++) { q_cart[i] = 0.0; for (j = 0; j < 3; j++) { q_cart[i] += reciprocal_lattice[i][j] * q[j]; } } } inv_dielectric_factor = 0.0; for (i = 0; i < 3; i++) { tmp_val = 0.0; for (j = 0; j < 3; j++) { tmp_val += dielectric[i][j] * q_cart[j]; } inv_dielectric_factor += tmp_val * q_cart[i]; } /* N = num_satom / num_patom = number of prim-cell in supercell */ /* N is used for Wang's method. */ dielectric_factor = nac_factor / inv_dielectric_factor / num_satom * num_patom; dym_get_charge_sum(charge_sum, num_patom, dielectric_factor, q_cart, born); } static long needs_nac(const double (*born)[3][3], const long (*grid_address)[3], const long gp, const double *q_direction) { long is_nac; if (born) { if (grid_address[gp][0] == 0 && grid_address[gp][1] == 0 && grid_address[gp][2] == 0 && q_direction == NULL) { is_nac = 0; } else { is_nac = 1; } } else { is_nac = 0; } return is_nac; }
copy.c
#include "copy.h" void copy_ref(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { b[i] = a[i]; } } void copy_mov(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { double t; //t = a[i]; asm ("mov %1, %0" : "=r" (t) : "m" (a[i])); //b[i] = t; asm ("mov %1, %0" : "=m" (b[i]) : "r" (t)); } } void copy_rep_movsq(size_t n, const double * RESTRICT a, double * RESTRICT b) { /* It might make more sense to do rep-movsq a page at a time * and make the alignment nicer... */ #ifdef _OPENMP #pragma omp parallel { int me = omp_get_thread_num(); int nt = omp_get_num_threads(); size_t chunk = 1+(n-1)/nt; size_t start = me*chunk; size_t end = (me+1)*chunk; if (end>n) end = n; size_t tn = (end>start) ? end-start : 0; //const double * RESTRICT ta = &( a[start] ); // double * RESTRICT tb = &( b[start] ); const double * RESTRICT ta = a+start; double * RESTRICT tb = b+start; //printf("zzz %d: chunk=%zu\n", me, chunk); fflush(stdout); //printf("zzz %d: start=%zu\n", me, start); fflush(stdout); //printf("zzz %d: xend=%zu\n", me, end); fflush(stdout); //printf("zzz %d: count=%zd\n", me, tn); fflush(stdout); #ifdef __INTEL_COMPILER asm("rep movsq" : "=D" (tb), "=S" (ta), "=c" (tn) : "0" (tb), "1" (ta), "2" (tn) : "memory"); #else tn *= sizeof(double); memcpy(tb,ta,tn); #endif } #else { #if HAS_GNU_EXTENDED_ASM asm("rep movsq" : "=D" (b), "=S" (a), "=c" (n) : "0" (b), "1" (a), "2" (n) : "memory"); #else tn *= sizeof(double); memcpy(b,a,n*sizeof(double)); #endif } #endif } #ifdef __SSE__ #if 0 /* BROKEN */ void copy_movntq(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { double t; //t = a[i]; asm ("mov %1, %0" : "=r" (t) : "m" (a[i])); //b[i] = t; // movntq does not work here... asm ("movntq %1, %0" : "=m" (b[i]) : "r" (t)); } asm ("sfence" ::: "memory"); } #endif #ifdef __INTEL_COMPILER void copy_movntq64(size_t n, const double * RESTRICT a, double * RESTRICT b) { //_mm_empty(); OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { __m64 t = _m_from_int64( *(__int64*)&(a[i]) ); _mm_stream_pi( (__m64*)&(b[i]), (__m64)t); } _mm_sfence(); } #endif /* ICC */ #endif /* SSE */ #ifdef __SSE2__ void copy_movnti(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { double t; //t = a[i]; asm ("mov %1, %0" : "=r" (t) : "m" (a[i])); //b[i] = t; asm ("movnti %1, %0" : "=m" (b[i]) : "r" (t)); } asm ("sfence" ::: "memory"); } #ifdef __INTEL_COMPILER void copy_movnti64(size_t n, const double * RESTRICT a, double * RESTRICT b) { //_mm_empty(); OMP_PARALLEL_FOR for (size_t i=0; i<n; i++) { __m64 t = _m_from_int64( *(__int64*)&(a[i]) ); _mm_stream_si64( (__int64*)&(b[i]), *(__int64*)&t); } _mm_sfence(); } #endif /* ICC */ void copy_movapd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_load_pd( &(a[i]) ); _mm_store_pd( &(b[i]), t); } } void copy_movntpd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_load_pd( &(a[i]) ); _mm_stream_pd( &(b[i]), t); } _mm_sfence(); } #endif /* SSE2 */ #ifdef __SSE4_1__ void copy_movntdqa128(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128i t = _mm_stream_load_si128( (__m128i*)&(a[i]) ); _mm_stream_si128 ( (__m128i*)&(b[i]), t); } _mm_sfence(); } #endif /* SSE4.1 */ #ifdef __AVX__ void copy_vmovapd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_load_pd( &(a[i]) ); _mm256_store_pd( &(b[i]), t); } } void copy_vmovntpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_load_pd( &(a[i]) ); _mm256_stream_pd( &(b[i]), t); } _mm_sfence(); } #endif /* AVX */ #ifdef __AVX2__ void copy_vmovntdqa256(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256i t = _mm256_stream_load_si256( (__m256i*)&(a[i]) ); _mm256_stream_si256 ( (__m256i*)&(b[i]), t); } _mm_sfence(); } void copy_vgatherdpd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m128i vindex = _mm_set_epi32(-1,-1,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_i32gather_pd( &(a[i]), vindex, 8 /* scale */ ); _mm_storel_pd( &(b[i ]), t); _mm_storeh_pd( &(b[i+1]), t); } } void copy_vgatherqpd128(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m128i vindex = _mm_set_epi64x(1,0); // works OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=2) { __m128d t = _mm_i64gather_pd( &(a[i]), vindex, 8 /* scale */ ); _mm_storel_pd( &(b[i ]), t); _mm_storeh_pd( &(b[i+1]), t); } } void copy_vgatherdpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m128i vindex = _mm_set_epi32(3,2,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_i32gather_pd( &(a[i]), vindex, 8 /* scale */ ); __m128d l = _mm256_extractf128_pd(t,0); __m128d u = _mm256_extractf128_pd(t,1); _mm_storel_pd( &(b[i ]), l); _mm_storeh_pd( &(b[i+1]), l); _mm_storel_pd( &(b[i+2]), u); _mm_storeh_pd( &(b[i+3]), u); } } void copy_vgatherqpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m256i vindex = _mm256_set_epi64x(3,2,1,0); // works OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_i64gather_pd( &(a[i]), vindex, 8 /* scale */ ); __m128d l = _mm256_extractf128_pd(t,0); __m128d u = _mm256_extractf128_pd(t,1); _mm_storel_pd( &(b[i ]), l); _mm_storeh_pd( &(b[i+1]), l); _mm_storel_pd( &(b[i+2]), u); _mm_storeh_pd( &(b[i+3]), u); } } void copy_mvgatherqpd256(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m256i vindex = _mm256_set_epi64x(3,2,1,0); // works // O in OQ means ordered, i.e. AND. unordered is OR. Q means quiet i.e. non-signaling. __m256d src = _mm256_cmp_pd(_mm256_setzero_pd(),_mm256_setzero_pd(),_CMP_EQ_OQ); // sets all bits to 1 __m256d mask = src; OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=4) { __m256d t = _mm256_mask_i64gather_pd( src, &(a[i]), vindex, mask, 8 /* scale */ ); __m128d l = _mm256_extractf128_pd(t,0); __m128d u = _mm256_extractf128_pd(t,1); _mm_storel_pd( &(b[i ]), l); _mm_storeh_pd( &(b[i+1]), l); _mm_storel_pd( &(b[i+2]), u); _mm_storeh_pd( &(b[i+3]), u); } } #endif /* AVX2 */ #ifdef __AVX512F__ void copy_vmovapd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_load_pd( &(a[i]) ); _mm512_store_pd( &(b[i]), t); } } void copy_vmovupd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_loadu_pd( &(a[i]) ); _mm512_storeu_pd( &(b[i]), t); } } void copy_mvmovapd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_load_pd( src, k, &(a[i]) ); _mm512_mask_store_pd( &(b[i]), k, t); } } void copy_mvmovupd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_loadu_pd( src, k, &(a[i]) ); _mm512_mask_storeu_pd( &(b[i]), k, t); } } void copy_vmovntpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_load_pd( &(a[i]) ); _mm512_stream_pd( &(b[i]), t); } _mm_sfence(); } void copy_vmovntdqa512(size_t n, const double * RESTRICT a, double * RESTRICT b) { OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512i t = _mm512_stream_load_si512( (__m512i*)&(a[i]) ); _mm512_stream_si512 ( (__m512i*)&(b[i]), t); } _mm_sfence(); } void copy_vGSdpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m256i vindex = _mm256_set_epi32(7,6,5,4,3,2,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_i32gather_pd(vindex, &(a[i]), 8 /* scale */ ); _mm512_i32scatter_pd( &(b[i]), vindex, t, 8 /* scale */ ); } } void copy_mvGSdpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; const __m256i vindex = _mm256_set_epi32(7,6,5,4,3,2,1,0); // start from the right... OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_i32gather_pd(src, k, vindex, &(a[i]), 8 /* scale */ ); _mm512_mask_i32scatter_pd( &(b[i]), k, vindex, t, 8 /* scale */ ); } } void copy_vGSqpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { const __m512i vindex = _mm512_set_epi64(7,6,5,4,3,2,1,0); OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_i64gather_pd(vindex, &(a[i]), 8 /* scale */ ); _mm512_i64scatter_pd( &(b[i]), vindex, t, 8 /* scale */ ); } } void copy_mvGSqpd512(size_t n, const double * RESTRICT a, double * RESTRICT b) { __m512d src = {0}; __mmask8 k = 255; const __m512i vindex = _mm512_set_epi64(7,6,5,4,3,2,1,0); OMP_PARALLEL_FOR for (size_t i=0; i<n; i+=8) { __m512d t = _mm512_mask_i64gather_pd(src, k, vindex, &(a[i]), 8 /* scale */ ); _mm512_mask_i64scatter_pd( &(b[i]), k, vindex, t, 8 /* scale */ ); } } #endif /* AVX-512F */
omp_reduction.c
/****************************************************************************** * FILE: omp_reduction.c * DESCRIPTION: * OpenMP Example - Combined Parallel Loop Reduction - C/C++ Version * This example demonstrates a sum reduction within a combined parallel loop * construct. Notice that default data element scoping is assumed - there * are no clauses specifying shared or private variables. OpenMP will * automatically make loop index variables private within team threads, and * global variables shared. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int i, n; float a[100], b[100], sum; /* Some initializations */ n = 100; for (i=0; i < n; i++) a[i] = b[i] = i * 1.0; sum = 0.0; #pragma omp parallel for reduction(+:sum) for (i=0; i < n; i++) sum = sum + (a[i] * b[i]); printf(" Sum = %f\n",sum); return 0; }
spawn_parallel_omp.c
#ifdef HAVE_CONFIG_H # include "config.h" /* for _GNU_SOURCE */ #endif #include <assert.h> #include <stdio.h> #include <omp.h> #include <qthread/qthread.h> #include <qthread/qtimer.h> #define SILENT_ARGPARSING #include "argparsing.h" #include "log.h" static aligned_t null_task(void *args_) { return 0; } int main(int argc, char *argv[]) { uint64_t count = 1048576; unsigned long threads = 1; qtimer_t timer; double total_time = 0.0; CHECK_VERBOSE(); NUMARG(count, "MT_COUNT"); assert(0 != count); #pragma omp parallel #pragma omp single { timer = qtimer_create(); threads = omp_get_num_threads(); qtimer_start(timer); #pragma omp parallel for for (uint64_t i = 0; i < count; i++) { #pragma omp task untied null_task(NULL); } #pragma omp taskwait qtimer_stop(timer); } total_time = qtimer_secs(timer); qtimer_destroy(timer); LOG_SPAWN_PARALLEL_YAML(count, total_time) LOG_ENV_OMP_YAML(threads) return 0; } /* vim:set expandtab */
task-5.c
/* { dg-do run } */ #include <omp.h> #include <stdlib.h> int err; int main () { int e; #pragma omp parallel shared(err) { if (omp_in_final ()) #pragma omp atomic write err = 1; #pragma omp task if (0) shared(err) { if (omp_in_final ()) #pragma omp atomic write err = 1; #pragma omp task if (0) shared(err) if (omp_in_final ()) #pragma omp atomic write err = 1; } #pragma omp task final (1) shared(err) { if (!omp_in_final ()) #pragma omp atomic write err = 1; #pragma omp taskyield #pragma omp taskwait #pragma omp task shared(err) if (!omp_in_final ()) #pragma omp atomic write err = 1; } } #pragma omp atomic read e = err; if (e) abort (); return 0; }
kmeans_clustering.c
/*****************************************************************************/ /*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */ /*By downloading, copying, installing or using the software you agree */ /*to this license. If you do not agree to this license, do not download, */ /*install, copy or use the software. */ /* */ /* */ /*Copyright (c) 2005 Northwestern University */ /*All rights reserved. */ /*Redistribution of the software in source and binary forms, */ /*with or without modification, is permitted provided that the */ /*following conditions are met: */ /* */ /*1 Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* */ /*2 Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in the */ /* documentation and/or other materials provided with the distribution.*/ /* */ /*3 Neither the name of Northwestern University nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* */ /*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */ /*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ /*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */ /*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */ /*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */ /*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */ /*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */ /*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */ /*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /*POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************************/ /*************************************************************************/ /** File: kmeans_clustering.c **/ /** Description: Implementation of regular k-means clustering **/ /** algorithm **/ /** Author: Wei-keng Liao **/ /** ECE Department, Northwestern University **/ /** email: wkliao@ece.northwestern.edu **/ /** **/ /** Edited by: Jay Pisharath **/ /** Northwestern University. **/ /** **/ /** ================================================================ **/ /** **/ /** Edited by: Sang-Ha Lee **/ /** University of Virginia **/ /** **/ /** Description: No longer supports fuzzy c-means clustering; **/ /** only regular k-means clustering. **/ /** Simplified for main functionality: regular k-means **/ /** clustering. **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "kmeans.h" #include "openacc.h" #define RANDOM_MAX 2147483647 #ifndef FLT_MAX #define FLT_MAX 3.40282347e+38 #endif #ifndef _NPOINTS #define _NPOINTS 819200 #endif #ifndef _UNROLLFAC_ #define _UNROLLFAC_ 100 #endif #define _NTHREADS (_NPOINTS/_UNROLLFAC_) #ifdef _OPENARC_ #if _NPOINTS == 204800 #pragma openarc #define _NPOINTS 204800 #elif _NPOINTS == 494020 #pragma openarc #define _NPOINTS 494020 #elif _NPOINTS == 819200 #pragma openarc #define _NPOINTS 819200 #endif #if _UNROLLFAC_ == 1 #pragma openarc #define _UNROLLFAC_ 1 #elif _UNROLLFAC_ == 2 #pragma openarc #define _UNROLLFAC_ 2 #elif _UNROLLFAC_ == 4 #pragma openarc #define _UNROLLFAC_ 4 #elif _UNROLLFAC_ == 5 #pragma openarc #define _UNROLLFAC_ 5 #elif _UNROLLFAC_ == 800 #pragma openarc #define _UNROLLFAC_ 800 #elif _UNROLLFAC_ == 10 #pragma openarc #define _UNROLLFAC_ 10 #elif _UNROLLFAC_ == 100 #pragma openarc #define _UNROLLFAC_ 100 #endif #pragma openarc #define _NATTRIBUTES 34 #pragma openarc #define _NCLUSTERS 5 #pragma openarc #define _NTHREADS (_NPOINTS/_UNROLLFAC_) #endif extern double wtime(void); /*----< kmeans_clustering() >---------------------------------------------*/ PAType kmeans_clustering(float feature[_NPOINTS][_NATTRIBUTES], /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int membership[_NPOINTS]) /* out: [npoints] */ { int i, j, k, n=0, index, loop=0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float (*new_centers)[_NATTRIBUTES]; /* [nclusters][nfeatures] */ float (*clusters)[_NATTRIBUTES]; /* out: [nclusters][nfeatures] */ float delta; double timing; int nthreads; //int (*partial_new_centers_len)[_NCLUSTERS]; //float (*partial_new_centers)[_NCLUSTERS][_NATTRIBUTES]; ///////////////////////////////////////////// // Added for inlining find_nearest_point() // ///////////////////////////////////////////// int index_fnp, i_fnp; float max_dist=FLT_MAX; int i_ed; /////////////////////////////////////////////// // Added for unrolling of the parallel loop. // /////////////////////////////////////////////// int tid, ii; nthreads = npoints/_UNROLLFAC_; /* allocate space for returning variable clusters[] */ //clusters = (float (*)[_NATTRIBUTES]) malloc(nclusters * nfeatures * sizeof(float)); clusters = (float (*)[_NATTRIBUTES]) acc_create_unified(NULL, nclusters * nfeatures * sizeof(float)); /* randomly pick cluster centers */ for (i=0; i<nclusters; i++) { //n = (int)rand() % npoints; for (j=0; j<nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } for (i=0; i<npoints; i++) membership[i] = -1; /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int*) calloc(nclusters, sizeof(int)); new_centers = (float (*)[_NATTRIBUTES]) calloc(nclusters * nfeatures, sizeof(float)); //partial_new_centers_len = (int (*)[_NCLUSTERS]) calloc(nthreads*nclusters, sizeof(int)); //partial_new_centers =(float (*)[_NCLUSTERS][_NATTRIBUTES]) calloc(nthreads*nclusters*nfeatures, sizeof(float)); printf("num of threads = %d\n", nthreads); #pragma acc data copyin (feature[0:_NPOINTS][0:_NATTRIBUTES], membership[0:_NPOINTS]) create(clusters[0:_NCLUSTERS][0:_NATTRIBUTES]) do { delta = 0.0F; #pragma acc update device(clusters) #pragma acc kernels loop gang worker independent \ private(i, index, index_fnp, max_dist) \ reduction(+:new_centers[0:_NCLUSTERS][0:_NATTRIBUTES],new_centers_len[0:_NCLUSTERS]) #pragma openarc cuda sharedRW(new_centers_len) for(tid=0; tid<nthreads; tid++) { #pragma acc loop seq for (ii=0; ii<_UNROLLFAC_; ii++) { i = tid + ii*nthreads; /* find the index of nestest cluster centers */ //index = find_nearest_point(feature[i], // nfeatures, // clusters, // nclusters); max_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i_fnp=0; i_fnp<nclusters; i_fnp++) { float dist; //dist = euclid_dist_2(feature[i_fnp], clusters[i_fnp], nfeatures); /* no need square root */ dist = 0.0F; for (i_ed=0; i_ed<nfeatures; i_ed++) dist += (feature[i][i_ed]-clusters[i_fnp][i_ed]) * (feature[i][i_ed]-clusters[i_fnp][i_ed]); if (dist < max_dist) { max_dist = dist; index_fnp = i_fnp; } } index = index_fnp; /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0F; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of all objects located within */ new_centers_len[index]++; for (j=0; j<nfeatures; j++) new_centers[index][j] += feature[i][j]; } } /* end of #pragma omp parallel for */ /* replace old cluster centers with new_centers */ for (i=0; i<nclusters; i++) { for (j=0; j<nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0F; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } } while (delta > threshold && loop++ < 500); printf("loop count: %d\n", loop); free(new_centers); free(new_centers_len); return clusters; }
lastprivate-conditional-1.c
void foo (int *p) { int a = -1, b = -1, c = -1, d = -1, e = -1, f = -1, g = -1, h = -1; int i; #pragma omp teams { #pragma omp distribute lastprivate (conditional: a) /* { dg-error "conditional 'lastprivate' clause on 'distribute' construct" } */ for (i = 0; i < 32; i++) if (p[i]) a = i; #pragma omp distribute simd lastprivate (conditional: b) /* { dg-error "conditional 'lastprivate' clause on 'distribute' construct" } */ for (i = 0; i < 32; i++) if (p[i]) b = i; #pragma omp distribute parallel for lastprivate (conditional: c) /* { dg-error "conditional 'lastprivate' clause on 'distribute' construct" } */ for (i = 0; i < 32; i++) if (p[i]) c = i; #pragma omp distribute parallel for simd lastprivate (conditional: d) /* { dg-error "conditional 'lastprivate' clause on 'distribute' construct" } */ for (i = 0; i < 32; i++) if (p[i]) d = i; } #pragma omp teams distribute parallel for lastprivate (conditional: e) /* { dg-error "conditional 'lastprivate' clause on 'distribute' construct" } */ for (i = 0; i < 32; i++) if (p[i]) e = i; #pragma omp parallel { #pragma omp master #pragma omp taskloop lastprivate (conditional: f) /* { dg-error "conditional 'lastprivate' clause on 'taskloop' construct" } */ for (i = 0; i < 32; i++) if (p[i]) f = i; #pragma omp master taskloop simd lastprivate (conditional: g) /* { dg-error "conditional 'lastprivate' clause on 'taskloop' construct" } */ for (i = 0; i < 32; i++) if (p[i]) g = i; } #pragma omp parallel master taskloop simd lastprivate (conditional: h) /* { dg-error "conditional 'lastprivate' clause on 'taskloop' construct" } */ for (i = 0; i < 32; i++) if (p[i]) h = i; } struct S { int a, b; }; void bar (int *p) { struct S s = { -1, -1 }, t = { 1, 2 }; int i; #pragma omp parallel for lastprivate (conditional: s) /* { dg-error "non-scalar variable 's' in conditional 'lastprivate' clause" } */ for (i = 0; i < 32; i++) if (p[i]) { struct S u = t; u.b = i; s = u; } } /* { dg-prune-output "not supported yet" } */
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda/utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ case mshadow::kBfloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_BFLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBfloat16: \ LOG(FATAL) << "This operation does not " \ "support bfloat16"; \ break; \ case mshadow::kInt8: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; template<typename OP, int req> struct mixed_type_unary_op { typedef OP Operation; /*! \brief input is one tensor */ template<typename OType, typename IType> MSHADOW_XINLINE static void Map(index_t i, OType *out, const IType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i]))); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is two tensors with different type and with a boolean output tensor */ template<typename LType, typename RType, typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const LType *lhs, const RType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<bool val> struct set_to_bool : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to true and false */ using set_true = set_to_bool<true>; using set_false = set_to_bool<false>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
local_temperature_average_response_function.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #ifndef KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED #define KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED #include "includes/kratos_flags.h" #include "includes/model_part.h" #include "utilities/variable_utils.h" #include "response_functions/adjoint_response_function.h" namespace Kratos { ///@addtogroup ConvectionDiffusionApplication ///@{ ///@name Kratos Classes ///@{ class LocalTemperatureAverageResponseFunction: public AdjointResponseFunction { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(LocalTemperatureAverageResponseFunction); ///@} ///@name Life Cycle ///@{ /// Constructor. LocalTemperatureAverageResponseFunction(Parameters Settings, ModelPart& rModelPart) { KRATOS_TRY; mTargetModelPartName = Settings["model_part_name"].GetString(); auto& r_target_model_part = GetTargetModelPart(rModelPart, mTargetModelPartName); auto& r_nodes = r_target_model_part.Nodes(); mNumNodes = r_nodes.size(); VariableUtils variable_utils; variable_utils.SetFlag(STRUCTURE,true,r_nodes); // Note: this should not be parallel, the operation is not threadsafe if the variable is uninitialized for (auto& r_node : r_nodes) { r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS,0); } mNumNodes = rModelPart.GetCommunicator().GetDataCommunicator().SumAll(mNumNodes); auto& r_elements = rModelPart.Elements(); const int num_elements = r_elements.size(); #pragma omp parallel for for (int i = 0; i < num_elements; i++) { auto i_elem = r_elements.begin() + i; auto& r_geom = i_elem->GetGeometry(); for (unsigned int i = 0; i < r_geom.PointsNumber(); i++) { auto& r_node = r_geom[i]; if (r_node.Is(STRUCTURE)) { r_node.SetLock(); r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS) += 1; r_node.UnSetLock(); } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS); KRATOS_CATCH(""); } /// Destructor. ~LocalTemperatureAverageResponseFunction() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void Initialize() override { KRATOS_TRY; KRATOS_CATCH(""); } void CalculateGradient(const Element& rAdjointElement, const Matrix& rResidualGradient, Vector& rResponseGradient, const ProcessInfo& rProcessInfo) override { ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient); } void CalculateGradient(const Condition& rAdjointCondition, const Matrix& rResidualGradient, Vector& rResponseGradient, const ProcessInfo& rProcessInfo) override { noalias(rResponseGradient) = ZeroVector(rResidualGradient.size1()); } void CalculateFirstDerivativesGradient(const Element& rAdjointElement, const Matrix& rResidualGradient, Vector& rResponseGradient, const ProcessInfo& rProcessInfo) override { ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient); } void CalculateSecondDerivativesGradient(const Element& rAdjointElement, const Matrix& rResidualGradient, Vector& rResponseGradient, const ProcessInfo& rProcessInfo) override { ComputePointTemperatureSensitivityContribution(rResidualGradient, rAdjointElement.GetGeometry().Points(),rResponseGradient); } void CalculatePartialSensitivity(Element& rAdjointElement, const Variable<array_1d<double, 3>>& rVariable, const Matrix& rSensitivityMatrix, Vector& rSensitivityGradient, const ProcessInfo& rProcessInfo) override { if (rSensitivityGradient.size() != rSensitivityMatrix.size1()) rSensitivityGradient.resize(rSensitivityMatrix.size1(), false); noalias(rSensitivityGradient) = ZeroVector(rSensitivityMatrix.size1()); } void CalculatePartialSensitivity(Condition& rAdjointElement, const Variable<array_1d<double, 3>>& rVariable, const Matrix& rSensitivityMatrix, Vector& rSensitivityGradient, const ProcessInfo& rProcessInfo) override { if (rSensitivityGradient.size() != rSensitivityMatrix.size1()) rSensitivityGradient.resize(rSensitivityMatrix.size1(), false); noalias(rSensitivityGradient) = ZeroVector(rSensitivityMatrix.size1()); } double CalculateValue(ModelPart& rModelPart) override { KRATOS_TRY; const ModelPart& r_target_model_part = GetTargetModelPart(rModelPart, mTargetModelPartName); const double domain_aggregated_temperature = VariableUtils().SumHistoricalVariable<double>(TEMPERATURE, r_target_model_part); const Communicator& r_communicator = r_target_model_part.GetCommunicator(); const int number_of_nodes = r_communicator.LocalMesh().NumberOfNodes(); const int total_nodes = r_communicator.GetDataCommunicator().SumAll(number_of_nodes); return domain_aggregated_temperature / static_cast<double>(total_nodes); KRATOS_CATCH(""); } ///@} protected: ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} private: ///@name Member Variables ///@{ int mNumNodes = 0; std::string mTargetModelPartName; ///@} ///@name Private Operators ///@{ void ComputePointTemperatureSensitivityContribution( const Matrix& rDerivativesOfResidual, const Element::NodesArrayType& rNodes, Vector& rLocalSensitivityContribution) const { if (rLocalSensitivityContribution.size() != rDerivativesOfResidual.size1()) rLocalSensitivityContribution.resize(rDerivativesOfResidual.size1(), false); noalias(rLocalSensitivityContribution) = ZeroVector(rLocalSensitivityContribution.size()); const unsigned int num_nodes = rNodes.size(); for (unsigned int i = 0; i < num_nodes; i++) { if (rNodes[i].Is(STRUCTURE)) { double factor = 1.0 / (rNodes[i].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS)*mNumNodes); rLocalSensitivityContribution[i] = factor; } } } ModelPart& GetTargetModelPart(ModelPart& rModelPart, const std::string& rTargetModelPartName) { KRATOS_TRY; if (rModelPart.Name() == rTargetModelPartName) { return rModelPart; } else if (rModelPart.HasSubModelPart(rTargetModelPartName)) { return rModelPart.GetSubModelPart(rTargetModelPartName); } else { KRATOS_ERROR << "Unknown ModelPart " << rTargetModelPartName << "." << std::endl; } KRATOS_CATCH("") return rModelPart; } ///@} ///@name Private Operations ///@{ ///@} }; ///@} // Kratos Classes ///@} // ConvectionDiffusionApplication group } #endif // KRATOS_LOCAL_TEMPERATURE_AVERAGE_RESPONSE_FUNCTION_H_INCLUDED
GB_unaryop__lnot_uint64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int32 // op(A') function: GB_tran__lnot_uint64_int32 // C type: uint64_t // A type: int32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int32 ( uint64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vnc_fmt_plug.c
/* VNC cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> * * On Windows, Use Ettercap to get VNC challenge-response pairs in * JtR format. E.g. ettercap -Tq -r /home/user/sample.pcap * * On other platforms, vncpcap2john.cpp should be able to parse * .pcap files and output VNC challenge-response pairs in JtR format * * bit_flip table and encryption algorithm are taken fron VNCcrack. * * (C) 2003, 2004, 2006, 2008 Jack Lloyd <lloyd@randombit.net> * Licensed under the GNU GPL v2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free * Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_vnc; #elif FMT_REGISTERS_H john_register_one(&fmt_vnc); #else #include <openssl/des.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 1024 // tuned on core i7 #endif #include "memdbg.h" #define FORMAT_LABEL "VNC" #define FORMAT_NAME "" #define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 8 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* DES_set_odd_parity() already applied */ static const unsigned char bit_flip[256] = { 0x01, 0x80, 0x40, 0xC1, 0x20, 0xA1, 0x61, 0xE0, 0x10, 0x91, 0x51, 0xD0, 0x31, 0xB0, 0x70, 0xF1, 0x08, 0x89, 0x49, 0xC8, 0x29, 0xA8, 0x68, 0xE9, 0x19, 0x98, 0x58, 0xD9, 0x38, 0xB9, 0x79, 0xF8, 0x04, 0x85, 0x45, 0xC4, 0x25, 0xA4, 0x64, 0xE5, 0x15, 0x94, 0x54, 0xD5, 0x34, 0xB5, 0x75, 0xF4, 0x0D, 0x8C, 0x4C, 0xCD, 0x2C, 0xAD, 0x6D, 0xEC, 0x1C, 0x9D, 0x5D, 0xDC, 0x3D, 0xBC, 0x7C, 0xFD, 0x02, 0x83, 0x43, 0xC2, 0x23, 0xA2, 0x62, 0xE3, 0x13, 0x92, 0x52, 0xD3, 0x32, 0xB3, 0x73, 0xF2, 0x0B, 0x8A, 0x4A, 0xCB, 0x2A, 0xAB, 0x6B, 0xEA, 0x1A, 0x9B, 0x5B, 0xDA, 0x3B, 0xBA, 0x7A, 0xFB, 0x07, 0x86, 0x46, 0xC7, 0x26, 0xA7, 0x67, 0xE6, 0x16, 0x97, 0x57, 0xD6, 0x37, 0xB6, 0x76, 0xF7, 0x0E, 0x8F, 0x4F, 0xCE, 0x2F, 0xAE, 0x6E, 0xEF, 0x1F, 0x9E, 0x5E, 0xDF, 0x3E, 0xBF, 0x7F, 0xFE, 0x01, 0x80, 0x40, 0xC1, 0x20, 0xA1, 0x61, 0xE0, 0x10, 0x91, 0x51, 0xD0, 0x31, 0xB0, 0x70, 0xF1, 0x08, 0x89, 0x49, 0xC8, 0x29, 0xA8, 0x68, 0xE9, 0x19, 0x98, 0x58, 0xD9, 0x38, 0xB9, 0x79, 0xF8, 0x04, 0x85, 0x45, 0xC4, 0x25, 0xA4, 0x64, 0xE5, 0x15, 0x94, 0x54, 0xD5, 0x34, 0xB5, 0x75, 0xF4, 0x0D, 0x8C, 0x4C, 0xCD, 0x2C, 0xAD, 0x6D, 0xEC, 0x1C, 0x9D, 0x5D, 0xDC, 0x3D, 0xBC, 0x7C, 0xFD, 0x02, 0x83, 0x43, 0xC2, 0x23, 0xA2, 0x62, 0xE3, 0x13, 0x92, 0x52, 0xD3, 0x32, 0xB3, 0x73, 0xF2, 0x0B, 0x8A, 0x4A, 0xCB, 0x2A, 0xAB, 0x6B, 0xEA, 0x1A, 0x9B, 0x5B, 0xDA, 0x3B, 0xBA, 0x7A, 0xFB, 0x07, 0x86, 0x46, 0xC7, 0x26, 0xA7, 0x67, 0xE6, 0x16, 0x97, 0x57, 0xD6, 0x37, 0xB6, 0x76, 0xF7, 0x0E, 0x8F, 0x4F, 0xCE, 0x2F, 0xAE, 0x6E, 0xEF, 0x1F, 0x9E, 0x5E, 0xDF, 0x3E, 0xBF, 0x7F, 0xFE }; #ifdef VNC_DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static struct fmt_tests vnc_tests[] = { {"$vnc$*84076F040550EEA9341967633B5F3855*DD96D21781A70DA49443279975404DD0", "pass1234"}, {"$vnc$*6EFF78767762AD104E52A2E15FDA3A1A*C448C3C4BA7218EBAC29FD6623E85BAC", "pass1234"}, {"$vnc$*0805B790B58E967F2A350A0C99DE3881*AECB26FAEAAA62D79636A5934BAC1078", "Password"}, {"$vnc$*ADDC021F444F999B8E27144C0DCE7389*AFAF1BB57588784333962A124668A2C6", "openwall"}, {"$vnc$*1D03C57F2DFFCC72A5AE3AD559C9C3DB*547B7A6F36A154DB03A2575C6F2A4EC5", "openwall"}, {NULL} }; static struct custom_salt { char unsigned challenge[16]; char unsigned response[16]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; if (strncmp(ciphertext, "$vnc$*", 6)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += 6; /* skip leading $vnc$* */ if (!(ptr = strtok(ctcopy, "*"))) goto error; if (strlen(ptr) != 32 || !ishex(ptr)) goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (strlen(ptr) != 32 || !ishex(ptr)) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { int i; static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *p, *keeptr = ctcopy; ctcopy += 6; /* skip over "$vnc$*" */ p = strtok(ctcopy, "*"); for (i = 0; i < 16; i++) cs.challenge[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); for (i = 0; i < 16; i++) cs.response[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { int i; DES_cblock des_key; DES_key_schedule schedule; DES_cblock ivec; unsigned char encrypted_challenge[16] = { 0 }; /* process key */ for(i = 0; i < strlen((const char*)saved_key[index]); i++) des_key[i] = bit_flip[ARCH_INDEX(saved_key[index][i])]; memset(ivec, 0, 8); DES_set_key_unchecked(&des_key, &schedule); /* do encryption */ DES_cbc_encrypt(cur_salt->challenge, &encrypted_challenge[0], 8, &schedule, &ivec, DES_ENCRYPT); if(memcmp(encrypted_challenge, cur_salt->response, 8) == 0) { DES_cbc_encrypt(&cur_salt->challenge[8], &encrypted_challenge[8], 8, &schedule, &ivec, DES_ENCRYPT); if(memcmp(encrypted_challenge, cur_salt->response, 16) == 0) memcpy((unsigned char*)crypt_out[index], encrypted_challenge, 16); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void vnc_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > 8) saved_key_length = 8; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_vnc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif vnc_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, vnc_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mkl_functions-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file mkl_functions-inl.h * \brief Wrapper for MKL VML functions * \author Tao Lv, Shufan Wu */ #ifndef MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_ #define MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_ #if MSHADOW_USE_MKL == 1 #include "mkl_vml.h" namespace mxnet { namespace op { namespace mkl_func { MSHADOW_XINLINE static bool check_size(const size_t n) { const size_t MKL_INT_MAX = (sizeof(MKL_INT) == sizeof(int)) ? INT_MAX : LLONG_MAX; return (n <= MKL_INT_MAX); } MSHADOW_XINLINE static bool check_type(const int t) { return (t == mshadow::kFloat32 || t == mshadow::kFloat64); } #define MXNET_MKL_UNARY_MATH_FUNC(name, func) \ struct name { \ MSHADOW_XINLINE static void Vectorize(const index_t n, const float* src, float* dst) { \ vs##func(static_cast<MKL_INT>(n), src, dst); \ } \ MSHADOW_XINLINE static void Vectorize(const index_t n, const double* src, double* dst) { \ vd##func(static_cast<MKL_INT>(n), src, dst); \ } \ }; #define MXNET_MKL_BINARY_MATH_FUNC(name, func) \ struct name { \ MSHADOW_XINLINE static void Vectorize(const index_t n, \ const float* a, \ const float* b, \ float* c) { \ vs##func(static_cast<MKL_INT>(n), a, b, c); \ } \ MSHADOW_XINLINE static void Vectorize(const index_t n, \ const double* a, \ const double* b, \ double* c) { \ vd##func(static_cast<MKL_INT>(n), a, b, c); \ } \ }; MXNET_MKL_UNARY_MATH_FUNC(erf, Erf); MXNET_MKL_UNARY_MATH_FUNC(exp, Exp); MXNET_MKL_UNARY_MATH_FUNC(exp2, Exp2); MXNET_MKL_UNARY_MATH_FUNC(exp10, Exp10); MXNET_MKL_UNARY_MATH_FUNC(expm1, Expm1); MXNET_MKL_UNARY_MATH_FUNC(log, Ln); MXNET_MKL_UNARY_MATH_FUNC(log2, Log2); MXNET_MKL_UNARY_MATH_FUNC(log10, Log10); MXNET_MKL_UNARY_MATH_FUNC(log1p, Log1p); MXNET_MKL_UNARY_MATH_FUNC(sin, Sin); MXNET_MKL_UNARY_MATH_FUNC(cos, Cos); MXNET_MKL_UNARY_MATH_FUNC(tan, Tan); MXNET_MKL_UNARY_MATH_FUNC(asin, Asin); MXNET_MKL_UNARY_MATH_FUNC(acos, Acos); MXNET_MKL_UNARY_MATH_FUNC(atan, Atan); MXNET_MKL_UNARY_MATH_FUNC(sinh, Sinh); MXNET_MKL_UNARY_MATH_FUNC(cosh, Cosh); MXNET_MKL_UNARY_MATH_FUNC(tanh, Tanh); MXNET_MKL_UNARY_MATH_FUNC(asinh, Asinh); MXNET_MKL_UNARY_MATH_FUNC(acosh, Acosh); MXNET_MKL_UNARY_MATH_FUNC(atanh, Atanh); MXNET_MKL_UNARY_MATH_FUNC(sqrt, Sqrt); MXNET_MKL_UNARY_MATH_FUNC(abs, Abs); MXNET_MKL_UNARY_MATH_FUNC(cbrt, Cbrt); MXNET_MKL_UNARY_MATH_FUNC(round, Round); MXNET_MKL_UNARY_MATH_FUNC(ceil, Ceil); MXNET_MKL_UNARY_MATH_FUNC(floor, Floor); MXNET_MKL_UNARY_MATH_FUNC(trunc, Trunc); MXNET_MKL_UNARY_MATH_FUNC(lgamma, LGamma); MXNET_MKL_UNARY_MATH_FUNC(tgamma, TGamma); MXNET_MKL_UNARY_MATH_FUNC(square, Sqr); MXNET_MKL_BINARY_MATH_FUNC(add, Add); MXNET_MKL_BINARY_MATH_FUNC(sub, Sub); MXNET_MKL_BINARY_MATH_FUNC(mul, Mul); MXNET_MKL_BINARY_MATH_FUNC(pow, Pow); MXNET_MKL_BINARY_MATH_FUNC(hypot, Hypot); template <typename DType> MSHADOW_XINLINE static void sum_(index_t n, DType* in, DType* dst) { DType sum = 0.0f; for (index_t i = 0; i < n; i++) sum += in[i]; dst[0] = sum; } // LayerNorm on the last dimension template <typename DType> MSHADOW_XINLINE static void LayerNormLastDim(index_t m, index_t n, DType* a, DType* b, DType* gamma, DType* beta, DType* mean, DType* var, DType eps) { auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(nthreads) for (index_t i = 0; i < m; i++) { DType* in_offset = a + i * n; DType* out_offset = b + i * n; DType x_sum = 0.0f; DType x_square_sum = 0.0f; #if !defined(_MSC_VER) #pragma omp simd #endif for (index_t j = 0; j < n; j++) { x_sum += in_offset[j]; x_square_sum += in_offset[j] * in_offset[j]; } mean[i] = x_sum / n; var[i] = math::sqrt(x_square_sum / n - mean[i] * mean[i] + eps); #if !defined(_MSC_VER) #pragma omp simd #endif for (index_t j = 0; j < n; j++) { out_offset[j] = (in_offset[j] - mean[i]) * gamma[j] / var[i] + beta[j]; } } } } // namespace mkl_func } // namespace op } // namespace mxnet #endif // MSHADOW_USE_MKL == 1 #endif // MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_
random_draw.c
/* Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /********************************************************************** Name: LCG Purpose: Provide a mixed Linear Congruential Generator of pseudo-random numbers with a period of 2^64, plus tools to jump ahead in a sequence of such generated numbers. For details, see individual functions. Functions: LCG_next: a new pseudo-randon number LCG_get_chunk: return subset of an interval of natural numbers LCG_init: initialize the generator LCG_jump: jump ahead into a sequence of pseudo-random numbers random_draw: Notes: LCG_init must be called by each thread or rank before any jump into a sequence of pseudo-random numbers is made History: Written by Rob Van der Wijngaart, December 2015 **********************************************************************/ #include <par-res-kern_general.h> #include <math.h> #include <stdint.h> #include <inttypes.h> #include <limits.h> #include <random_draw.h> static uint64_t LCG_a = 6364136223846793005; static uint64_t LCG_c = 1442695040888963407; static uint64_t LCG_seed_init = 27182818285; //used to (re)set seed #ifdef __OPENMP #pragma omp threadprivate (LCG_seed, LCG_A) #endif /* for a range of 0 to size-i, find chunk assigned to calling thread */ void LCG_get_chunk(uint64_t *start, uint64_t *end, int tid, int nthreads, uint64_t size) { uint64_t chunk, remainder; chunk = size/nthreads; remainder = size - chunk*nthreads; if ((uint64_t)tid < remainder) { *start = tid*(chunk+1); *end = *start + chunk; } else { *start = remainder*(chunk+1) + (tid-remainder)*chunk; *end = *start + chunk -1; } return; } static uint64_t tail(uint64_t x) { uint64_t x2 = x; if (!x) return x; uint64_t result = 1; while (x>>=1) result <<=1; return (x2 - result); } /* Sum(i=1,2^k) a^i */ static uint64_t SUMPOWER(int k, random_draw_t *parm) { if (!k) return LCG_a; return SUMPOWER(k-1, parm)*(1+parm->LCG_A[k-1]); } static int LOG(uint64_t n) { int result = 0; while (n>>=1) result++; return(result); } /* Sum(i=1,n) a^i, with n arbitrary */ static uint64_t SUMK(uint64_t n, random_draw_t *parm) { if (n==0) return(0); uint64_t HEAD = SUMPOWER(LOG(n),parm); uint64_t TAILn = tail(n); if (TAILn==0) return(HEAD); return(HEAD + (parm->LCG_A[LOG(n)])*SUMK(TAILn,parm)); } uint64_t LCG_next(uint64_t bound, random_draw_t *parm) { parm->LCG_seed = LCG_a*parm->LCG_seed + LCG_c; return (parm->LCG_seed%bound); } void LCG_init(random_draw_t *parm){ int i; parm->LCG_seed = LCG_seed_init; parm->LCG_A[0] = LCG_a; for (i=1; i<NMAX; i++) { parm->LCG_A[i] = parm->LCG_A[i-1]*parm->LCG_A[i-1]; } return; } void LCG_jump(uint64_t m, uint64_t bound, random_draw_t *parm){ int i, index, LCG_power[NMAX]; uint64_t mm, s_part; for (i=0; i<NMAX; i++) LCG_power[i] = 0; parm->LCG_seed = LCG_seed_init; /* Catch two special cases */ switch (m) { case 0: return; case 1: LCG_next(bound, parm); return; } mm = m; index = 0; while (mm) { LCG_power[index++] = mm&1; mm >>=1; } s_part = 1; for (i=0; i<index; i++) if (LCG_power[i]) s_part *= parm->LCG_A[i]; parm->LCG_seed = s_part*parm->LCG_seed + (SUMK(m-1,parm)+1)*LCG_c; return; } uint64_t random_draw(double mu, random_draw_t *parm) { const double two_pi = 2.0*3.14159265358979323846; const uint64_t rand_max = ULLONG_MAX; const double rand_div = 1.0/ULLONG_MAX; const uint64_t denominator = UINT_MAX; static double z0, z1; double u0, u1, sigma; static uint64_t numerator; static uint64_t i0, i1; if (mu>=1.0) { /* set std dev equal to 15% of average; ensures result will never be negative */ sigma = mu*0.15; u0 = LCG_next(rand_max, parm) * rand_div; u1 = LCG_next(rand_max, parm) * rand_div; z0 = sqrt(-2.0 * log(u0)) * cos(two_pi * u1); z1 = sqrt(-2.0 * log(u0)) * sin(two_pi * u1); return (uint64_t) (z0 * sigma + mu+0.5); } else { /* we need to pick two integers whose quotient approximates mu; set one to UINT_MAX */ numerator = (uint32_t) (mu*(double)denominator); i0 = LCG_next(denominator, parm); /* don't use value, but must call LCG_next twice */ i1 = LCG_next(denominator, parm); return ((uint64_t)(i1<=numerator)); } }
snmg_test_utils.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Interanl helper functions // Author: Alex Fender afender@nvidia.com #pragma once #include <omp.h> #include "test_utils.h" #include <fstream> // std::ifstream // global to local offsets by shifting all offsets by the first offset value template <typename T> void shift_offsets(std::vector<T> & off_loc) { auto start = off_loc.front(); for (auto i = size_t{0}; i < off_loc.size(); ++i) off_loc[i] -= start; } // 1D partitioning such as each GPU has about the same number of edges template <typename T> void edge_partioning(std::vector<T> & off_h, std::vector<size_t> & part_offset, std::vector<size_t> & v_loc, std::vector<size_t> & e_loc) { auto i = omp_get_thread_num(); auto p = omp_get_num_threads(); //set first and last partition offsets part_offset[0] = 0; part_offset[p] = off_h.size()-1; if (i>0) { //get the first vertex ID of each partition auto loc_nnz = off_h.back()/p; auto start_nnz = i*loc_nnz; auto start_v = 0; for (auto j = size_t{0}; j < off_h.size(); ++j) { if (off_h[j] >= start_nnz) { start_v = j; break; } } part_offset[i] = start_v; } // all threads must know their partition offset #pragma omp barrier // Store the local number of V and E for convenience v_loc[i] = part_offset[i+1] - part_offset[i]; e_loc[i] = off_h[part_offset[i+1]] - off_h[part_offset[i]]; } // csv for HiBench template <typename idx_t> int read_single_file(std::string fileName, std::vector<idx_t>& s, std::vector<idx_t>& d) { s.clear(); d.clear(); std::ifstream f(fileName); if (!f) { return 1; } idx_t src, dst; while (f>>src>>dst) { s.push_back(src); d.push_back(dst); } f.close(); return 0; } template<typename idx_t, typename val_t> void load_coo_loc(std::vector<idx_t>& cooRow, std::vector<idx_t>& cooCol, std::vector<val_t>& cooVal, gdf_column* cooRowLocal, gdf_column* cooColLocal, gdf_column* cooValLocal) { auto i = omp_get_thread_num(); auto p = omp_get_num_threads(); std::vector<size_t> startOffsets(p + 1); startOffsets[p] = cooRow.size(); size_t numRows = cooRow.size() / p; for (int j = 0; j < p; j++) startOffsets[j] = j * numRows; std::vector<idx_t> cooRow_part(cooRow.begin() + startOffsets[i], cooRow.begin() + startOffsets[i + 1]); std::vector<idx_t> cooCol_part(cooCol.begin() + startOffsets[i], cooCol.begin() + startOffsets[i + 1]); create_gdf_column(cooRow_part, cooRowLocal); create_gdf_column(cooCol_part, cooColLocal); if (cooVal.size() > 0 && cooValLocal != nullptr) { std::vector<val_t> cooVal_part(cooVal.begin() + startOffsets[i], cooVal.begin() + startOffsets[i + 1]); create_gdf_column(cooVal_part, cooValLocal); } } template <typename idx_t,typename val_t> void load_csr_loc(std::vector<idx_t> & off_h, std::vector<idx_t> & ind_h, std::vector<val_t> & val_h, std::vector<size_t> & v_loc, std::vector<size_t> & e_loc, std::vector<size_t> & part_offset, gdf_column* col_off, gdf_column* col_ind, gdf_column* col_val) { auto i = omp_get_thread_num(); auto p = omp_get_num_threads(); edge_partioning(off_h, part_offset, v_loc, e_loc); ASSERT_EQ(part_offset[i+1]-part_offset[i], v_loc[i]); std::vector<idx_t> off_loc(off_h.begin()+part_offset[i], off_h.begin()+part_offset[i+1]+1), ind_loc(ind_h.begin()+off_h[part_offset[i]],ind_h.begin()+off_h[part_offset[i+1]]); std::vector<val_t> val_loc(val_h.begin()+off_h[part_offset[i]],val_h.begin()+off_h[part_offset[i+1]]); ASSERT_EQ(off_loc.size(), v_loc[i]+1); ASSERT_EQ(ind_loc.size(), e_loc[i]); ASSERT_EQ(val_loc.size(), e_loc[i]); #ifdef SNMG_VERBOSE #pragma omp barrier #pragma omp master { std::cout << off_h[part_offset[i]]<< std::endl; std::cout << off_h[part_offset[i+1]]<< std::endl; for (auto j = part_offset.begin(); j != part_offset.end(); ++j) std::cout << *j << ' '; std::cout << std::endl; for (auto j = v_loc.begin(); j != v_loc.end(); ++j) std::cout << *j << ' '; std::cout << std::endl; for (auto j = e_loc.begin(); j != e_loc.end(); ++j) std::cout << *j << ' '; std::cout << std::endl; } #pragma omp barrier #endif shift_offsets(off_loc); ASSERT_EQ(static_cast<size_t>(off_loc[part_offset[i+1]-part_offset[i]]),e_loc[i]); create_gdf_column(off_loc, col_off); ASSERT_EQ(off_loc.size(), static_cast<size_t>(col_off->size)); create_gdf_column(ind_loc, col_ind); create_gdf_column(val_loc, col_val); } void serializeMessage(std::string message){ auto i = omp_get_thread_num(); auto p = omp_get_num_threads(); for (int j = 0; j < p; j++){ if (i == j) std::cout << "Thread " << i << ": " << message << "\n"; #pragma omp barrier } }
atax.c
/** * atax.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define NX SIZE #define NY SIZE #ifndef M_PI #define M_PI 3.14159 #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A) { int i, j; for (i = 0; i < NX; i++) { x[i] = i * M_PI; for (j = 0; j < NY; j++) { A[i * NY + j] = ((DATA_TYPE)i * (j)) / NX; } } } int compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < NY; i++) { if (percentDiff(z[i], z_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void atax_cpu(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < NY; i++) { y[i] = 0; } for (i = 0; i < NX; i++) { tmp[i] = 0; for (j = 0; j < NY; j++) { tmp[i] = tmp[i] + A[i * NY + j] * x[j]; } for (j = 0; j < NY; j++) { y[j] = y[j] + A[i * NY + j] * tmp[i]; } } } void atax_OMP(DATA_TYPE *A, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { for (int i = 0; i < NY; i++) { y[i] = 0; } #pragma omp target teams map(to : A[ : NX *NY], x[ : NY]) map(tofrom : tmp[ : NX], y[ : NY]) device(DEVICE_ID) { #pragma omp distribute parallel for for (int i = 0; i < NX; i++) { tmp[i] = 0; for (int j = 0; j < NY; j++) { tmp[i] += A[i * NY + j] * x[j]; } } // Note that the Loop has been reversed #pragma omp distribute parallel for for (int j = 0; j < NY; j++) { for (int i = 0; i < NX; i++) { y[j] += A[i * NY + j] * tmp[i]; } } } } int main(int argc, char **argv) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; DATA_TYPE *tmp; A = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); x = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); y = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *)malloc(NY * sizeof(DATA_TYPE)); tmp = (DATA_TYPE *)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< Matrix Transpose and Vector Multiplication >>\n"); init_array(x, A); t_start = rtclock(); atax_OMP(A, x, y_outputFromGpu, tmp); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); atax_cpu(A, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(y, y_outputFromGpu); #endif free(A); free(x); free(y); free(y_outputFromGpu); free(tmp); return fail; }
phivector.h
#ifndef PHIVECTOR_H #define PHIVECTOR_H #include "../api/phigraph_aligned.h" #include <math.h> template <class Type> class PhiVector{ public: phiLong length; phiLong x; phiLong y; phiLong index; Type** array; PhiVector(phiLong dataNum){ length = dataNum; x = 64/sizeof(Type); y = ceil((phiDouble)dataNum/x); array = phimalloc(Type*,y); for(phiLong i = 0;i < y;i++){ array[i] = phimalloc(Type,x); } for(phiLong i = 0;i < y;i++){ for(phiLong j = 0;j < x;j++) array[i][j] = (Type)0; } // printf("x= %d,y=%d\n",x,y ); // printf("vector\n" ); index = 0; } void insert(Type value){ //printf("index=%d\n",index ); if(index < length){ phiLong x_temp = index/x; phiLong y_temp = index%x; array[x_temp][y_temp] = value; #pragma omp atmoic index++; } //printf("got22\n" ); } ~PhiVector(){ for(phiLong i=0;i < y; i++) free(array[i]); free(array); } }; #endif
ADM_constraint.h
void ADM_constraint(const paramstruct *restrict params, REAL *restrict xx[3], REAL *restrict in_gfs, REAL *restrict aux_gfs) { #include "set_Cparameters.h" #pragma omp parallel for for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) { const REAL xx2 = xx[2][i2]; for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) { const REAL xx1 = xx[1][i1]; for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0+=SIMD_width) { const REAL xx0 = xx[0][i0]; { /* * NRPy+ Finite Difference Code Generation, Step 1 of 1: Read from main memory and compute finite difference stencils: */ const REAL_SIMD_ARRAY hDD00 = ReadSIMD(&in_gfs[IDX4S(HDD00GF, i0,i1,i2)]); const REAL_SIMD_ARRAY hDD01 = ReadSIMD(&in_gfs[IDX4S(HDD01GF, i0,i1,i2)]); const REAL_SIMD_ARRAY hDD02 = ReadSIMD(&in_gfs[IDX4S(HDD02GF, i0,i1,i2)]); const REAL_SIMD_ARRAY hDD11 = ReadSIMD(&in_gfs[IDX4S(HDD11GF, i0,i1,i2)]); const REAL_SIMD_ARRAY hDD12 = ReadSIMD(&in_gfs[IDX4S(HDD12GF, i0,i1,i2)]); const REAL_SIMD_ARRAY hDD22 = ReadSIMD(&in_gfs[IDX4S(HDD22GF, i0,i1,i2)]); const REAL_SIMD_ARRAY aDD00 = ReadSIMD(&in_gfs[IDX4S(ADD00GF, i0,i1,i2)]); const REAL_SIMD_ARRAY aDD01 = ReadSIMD(&in_gfs[IDX4S(ADD01GF, i0,i1,i2)]); const REAL_SIMD_ARRAY aDD02 = ReadSIMD(&in_gfs[IDX4S(ADD02GF, i0,i1,i2)]); const REAL_SIMD_ARRAY aDD11 = ReadSIMD(&in_gfs[IDX4S(ADD11GF, i0,i1,i2)]); const REAL_SIMD_ARRAY aDD12 = ReadSIMD(&in_gfs[IDX4S(ADD12GF, i0,i1,i2)]); const REAL_SIMD_ARRAY aDD22 = ReadSIMD(&in_gfs[IDX4S(ADD22GF, i0,i1,i2)]); const REAL_SIMD_ARRAY trK = ReadSIMD(&in_gfs[IDX4S(TRKGF, i0,i1,i2)]); const REAL_SIMD_ARRAY cf = ReadSIMD(&in_gfs[IDX4S(CFGF, i0,i1,i2)]); const REAL_SIMD_ARRAY gammaDD00 = ReadSIMD(&in_gfs[IDX4S(GAMMADD00GF, i0,i1,i2)]); const REAL_SIMD_ARRAY gammaDD01 = ReadSIMD(&in_gfs[IDX4S(GAMMADD01GF, i0,i1,i2)]); const REAL_SIMD_ARRAY gammaDD02 = ReadSIMD(&in_gfs[IDX4S(GAMMADD02GF, i0,i1,i2)]); const REAL_SIMD_ARRAY gammaDD11 = ReadSIMD(&in_gfs[IDX4S(GAMMADD11GF, i0,i1,i2)]); const REAL_SIMD_ARRAY gammaDD12 = ReadSIMD(&in_gfs[IDX4S(GAMMADD12GF, i0,i1,i2)]); const REAL_SIMD_ARRAY gammaDD22 = ReadSIMD(&in_gfs[IDX4S(GAMMADD22GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUU00 = ReadSIMD(&in_gfs[IDX4S(KUU00GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUU01 = ReadSIMD(&in_gfs[IDX4S(KUU01GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUU02 = ReadSIMD(&in_gfs[IDX4S(KUU02GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUU11 = ReadSIMD(&in_gfs[IDX4S(KUU11GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUU12 = ReadSIMD(&in_gfs[IDX4S(KUU12GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUU22 = ReadSIMD(&in_gfs[IDX4S(KUU22GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUD00 = ReadSIMD(&in_gfs[IDX4S(KUD00GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUD01 = ReadSIMD(&in_gfs[IDX4S(KUD01GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUD02 = ReadSIMD(&in_gfs[IDX4S(KUD02GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUD11 = ReadSIMD(&in_gfs[IDX4S(KUD11GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUD12 = ReadSIMD(&in_gfs[IDX4S(KUD12GF, i0,i1,i2)]); const REAL_SIMD_ARRAY KUD22 = ReadSIMD(&in_gfs[IDX4S(KUD22GF, i0,i1,i2)]); const REAL_SIMD_ARRAY RDD00 = ReadSIMD(&aux_gfs[IDX4S(RDD00GF, i0,i1,i2)]); const REAL_SIMD_ARRAY RDD01 = ReadSIMD(&aux_gfs[IDX4S(RDD01GF, i0,i1,i2)]); const REAL_SIMD_ARRAY RDD02 = ReadSIMD(&aux_gfs[IDX4S(RDD02GF, i0,i1,i2)]); const REAL_SIMD_ARRAY RDD11 = ReadSIMD(&aux_gfs[IDX4S(RDD11GF, i0,i1,i2)]); const REAL_SIMD_ARRAY RDD12 = ReadSIMD(&aux_gfs[IDX4S(RDD12GF, i0,i1,i2)]); const REAL_SIMD_ARRAY RDD22 = ReadSIMD(&aux_gfs[IDX4S(RDD22GF, i0,i1,i2)]); const REAL_SIMD_ARRAY trR = ReadSIMD(&aux_gfs[IDX4S(TRRGF, i0,i1,i2)]); /* * NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory: */ const REAL_SIMD_ARRAY __RHS_exp_0 = RDD00*(gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + 2*RDD01*(-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + 2*RDD02*(gammaDD01*gammaDD12 - gammaDD02*gammaDD11)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + RDD11*(gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + 2*RDD12*(-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + RDD22*(gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + ((trK)*(trK)) + trR + (-aDD00/((cf)*(cf)) - 1.0/3.0*trK*(hDD00 + 1)/((cf)*(cf)))*(KUU00 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(KUD02 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(KUD01 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(KUD00 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD01*f0_of_xx0/((cf)*(cf)) - 1.0/3.0*f0_of_xx0*hDD01*trK/((cf)*(cf)))*(KUU01 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(KUD02 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(KUD01 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(KUD00 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD01*f0_of_xx0/((cf)*(cf)) - 1.0/3.0*f0_of_xx0*hDD01*trK/((cf)*(cf)))*(KUU01 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(KUD12 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(KUD11 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(KUD01 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) - 1.0/3.0*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))*(KUU11 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(KUD12 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(KUD11 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(KUD01 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) - 1.0/3.0*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))*(KUU02 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(KUD02 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(KUD01 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(KUD00 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) - 1.0/3.0*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))*(KUU02 + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(KUD22 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(KUD12 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD11*gammaDD22 - ((gammaDD12)*(gammaDD12)))*(KUD02 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(gammaDD01*gammaDD12 - gammaDD02*gammaDD11)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) - 1.0/3.0*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))*(KUU12 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(KUD12 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(KUD11 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(KUD01 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) - 1.0/3.0*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))*(KUU12 + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(KUD22 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD22 - ((gammaDD02)*(gammaDD02)))*(KUD12 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD01*gammaDD22 + gammaDD02*gammaDD12)*(KUD02 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(gammaDD01*gammaDD12 - gammaDD02*gammaDD11)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)) + (-aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) - 1.0/3.0*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))*(KUU22 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(KUD22 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD22*((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(KUD12 + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD12*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*((f0_of_xx0)*(f0_of_xx0))*f1_of_xx1*hDD12*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD11*((f0_of_xx0)*(f0_of_xx0))/((cf)*(cf)) + (1.0/3.0)*trK*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD01*gammaDD12 - gammaDD02*gammaDD11)*(KUD02 + (aDD00/((cf)*(cf)) + (1.0/3.0)*trK*(hDD00 + 1)/((cf)*(cf)))*(gammaDD01*gammaDD12 - gammaDD02*gammaDD11)/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (gammaDD00*gammaDD11 - ((gammaDD01)*(gammaDD01)))*(aDD02*f0_of_xx0*f1_of_xx1/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*f1_of_xx1*hDD02*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11) + (-gammaDD00*gammaDD12 + gammaDD01*gammaDD02)*(aDD01*f0_of_xx0/((cf)*(cf)) + (1.0/3.0)*f0_of_xx0*hDD01*trK/((cf)*(cf)))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11))/(gammaDD00*gammaDD11*gammaDD22 - gammaDD00*((gammaDD12)*(gammaDD12)) - ((gammaDD01)*(gammaDD01))*gammaDD22 + 2*gammaDD01*gammaDD02*gammaDD12 - ((gammaDD02)*(gammaDD02))*gammaDD11)); WriteSIMD(&aux_gfs[IDX4S(HGF, i0, i1, i2)], __RHS_exp_0); } } // END LOOP: for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0+=SIMD_width) } // END LOOP: for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) } // END LOOP: for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) }
silly.c
/* Tempo sequencial real 0m4.492s user 0m4.488s sys 0m0.000s real 0m4.405s user 0m4.402s sys 0m0.000s real 0m4.431s user 0m4.385s sys 0m0.000s real 0m4.651s user 0m4.385s sys 0m0.004s real 0m4.616s user 0m4.415s sys 0m0.008s Tempo paralelo com scheduler(static) real 0m2.648s user 0m4.920s sys 0m0.000s real 0m2.641s user 0m4.909s sys 0m0.000s real 0m2.647s user 0m4.917s sys 0m0.004s real 0m2.638s user 0m4.895s sys 0m0.000s real 0m2.684s user 0m4.900s sys 0m0.000s Tempo paralelo com scheduler(guided) (Melhor política) test passed real 0m2.637s user 0m4.895s sys 0m0.000s real 0m2.632s user 0m4.881s sys 0m0.008s real 0m2.643s user 0m4.905s sys 0m0.000s real 0m2.639s user 0m4.897s sys 0m0.008s real 0m2.643s user 0m4.901s sys 0m0.004s Henrique, bom dia, tentamos testar no servidor da PUC usando a política de scheduler dynamic, porém, o programa não terminava de rodar, e quando testavamos em nossos computadores, o programa terminava com "test failed" */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { omp_set_num_threads(2); int i, j, n = 30000; // Allocate input, output and position arrays int *in = (int*) calloc(n, sizeof(int)); int *pos = (int*) calloc(n, sizeof(int)); int *out = (int*) calloc(n, sizeof(int)); #pragma omp parallel for for(i=0; i < n; i++) in[i] = n-i; // Print input array // for(i=0; i < n; i++) // printf("%d ",in[i]); #pragma omp parallel for collapse(2) schedule(guided) for(i=0; i < n; i++) for(j=0; j < n; j++) if(in[i] > in[j]) pos[i]++; #pragma omp parallel for for(i=0; i < n; i++) out[pos[i]] = in[i]; // print output array // for(i=0; i < n; i++) // printf("%d ",out[i]); #pragma omp parallel for for(i=0; i < n; i++) if(i+1 != out[i]) { printf("test failed\n"); exit(0); } printf("test passed\n"); }
libm-nvptx.c
//===--------- libm/libm-nvptx.c ------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include <math.h> #include <stddef.h> #include <limits.h> #include "libm-nvptx.h" #pragma omp declare target #if 0 #define __FAST_OR_SLOW(fast, slow) fast #else #define __FAST_OR_SLOW(fast, slow) slow #endif // BEGIN FLOAT float acosf(float __a) { return __nv_acosf(__a); } float acoshf(float __a) { return __nv_acoshf(__a); } float asinf(float __a) { return __nv_asinf(__a); } float asinhf(float __a) { return __nv_asinhf(__a); } float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); } float atanf(float __a) { return __nv_atanf(__a); } float atanhf(float __a) { return __nv_atanhf(__a); } float cbrtf(float __a) { return __nv_cbrtf(__a); } float ceilf(float __a) { return __nv_ceilf(__a); } float copysignf(float __a, float __b) { return __nv_copysignf(__a, __b); } float cosf(float __a) { return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a); } float coshf(float __a) { return __nv_coshf(__a); } float cospif(float __a) { return __nv_cospif(__a); } float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); } float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); } float erfcf(float __a) { return __nv_erfcf(__a); } float erfcinvf(float __a) { return __nv_erfcinvf(__a); } float erfcxf(float __a) { return __nv_erfcxf(__a); } float erff(float __a) { return __nv_erff(__a); } float erfinvf(float __a) { return __nv_erfinvf(__a); } float exp10f(float __a) { return __nv_exp10f(__a); } float exp2f(float __a) { return __nv_exp2f(__a); } float expf(float __a) { return __nv_expf(__a); } float expm1f(float __a) { return __nv_expm1f(__a); } float fabsf(float __a) { return __nv_fabsf(__a); } float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); } float fdividef(float __a, float __b) { #if __FAST_MATH__ && !__CUDA_PREC_DIV return __nv_fast_fdividef(__a, __b); #else return __a / __b; #endif } float floorf(float __f) { return __nv_floorf(__f); } float fmaf(float __a, float __b, float __c) { return __nv_fmaf(__a, __b, __c); } float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); } float fminf(float __a, float __b) { return __nv_fminf(__a, __b); } float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); } float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); } float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); } int ilogbf(float __a) { return __nv_ilogbf(__a); } int __finitef(float __a) { return __nv_finitef(__a); } int __isinff(float __a) { return __nv_isinff(__a); } int __isnanf(float __a) { return __nv_isnanf(__a); } float j0f(float __a) { return __nv_j0f(__a); } float j1f(float __a) { return __nv_j1f(__a); } float jnf(int __n, float __a) { return __nv_jnf(__n, __a); } float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); } float lgammaf(float __a) { return __nv_lgammaf(__a); } long long llrintf(float __a) { return __nv_llrintf(__a); } long long llroundf(float __a) { return __nv_llroundf(__a); } float log10f(float __a) { return __nv_log10f(__a); } float log1pf(float __a) { return __nv_log1pf(__a); } float log2f(float __a) { return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a); } float logbf(float __a) { return __nv_logbf(__a); } float logf(float __a) { return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a); } long long __float2ll_rn(float __a) { return __nv_float2ll_rn(__a); } #if defined(__LP64__) long lrintf(float __a) { return __float2ll_rn(__a); } long lroundf(float __a) { return llroundf(__a); } #else long lrintf(float __a) { return __float2int_rn(__a); } long lroundf(float __a) { return roundf(__a); } #endif float modff(float __a, float *__b) { return __nv_modff(__a, __b); } // nanf - missing float nearbyintf(float __a) { return __nv_nearbyintf(__a); } float nextafterf(float __a, float __b) { return __nv_nextafterf(__a, __b); } float norm3df(float __a, float __b, float __c) { return __nv_norm3df(__a, __b, __c); } float norm4df(float __a, float __b, float __c, float __d) { return __nv_norm4df(__a, __b, __c, __d); } float normcdff(float __a) { return __nv_normcdff(__a); } float normf(int __dim, const float *__t) { return __nv_normf(__dim, __t); } float powf(float __a, float __b) { return __nv_powf(__a, __b); } float rcbrtf(float __a) { return __nv_rcbrtf(__a); } float remainderf(float __a, float __b) { return __nv_remainderf(__a, __b); } float remquof(float __a, float __b, int *__c) { return __nv_remquof(__a, __b, __c); } float rhypotf(float __a, float __b) { return __nv_rhypotf(__a, __b); } float rintf(float __a) { return __nv_rintf(__a); } float rnorm3df(float __a, float __b, float __c) { return __nv_rnorm3df(__a, __b, __c); } float rnorm4df(float __a, float __b, float __c, float __d) { return __nv_rnorm4df(__a, __b, __c, __d); } float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); } float rnormf(int __dim, const float *__t) { return __nv_rnormf(__dim, __t); } float roundf(float __a) { return __nv_roundf(__a); } float rsqrtf(float __a) { return __nv_rsqrtf(__a); } float scalblnf(float __a, long __b) { if (__b > INT_MAX) return __a > 0 ? HUGE_VALF : -HUGE_VALF; if (__b < INT_MIN) return __a > 0 ? 0.f : -0.f; return scalbnf(__a, (int)__b); } float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); } int __signbitf(float __a) { return __nv_signbitf(__a); } void sincosf(float __a, float *__sptr, float *__cptr) { return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __sptr, __cptr); } void sincospif(float __a, float *__sptr, float *__cptr) { return __nv_sincospif(__a, __sptr, __cptr); } float sinf(float __a) { return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a); } float sinhf(float __a) { return __nv_sinhf(__a); } float sinpif(float __a) { return __nv_sinpif(__a); } float sqrtf(float __a) { return __nv_sqrtf(__a); } float tanf(float __a) { return __nv_tanf(__a); } float tanhf(float __a) { return __nv_tanhf(__a); } float tgammaf(float __a) { return __nv_tgammaf(__a); } float truncf(float __a) { return __nv_truncf(__a); } float y0f(float __a) { return __nv_y0f(__a); } float y1f(float __a) { return __nv_y1f(__a); } float ynf(int __a, float __b) { return __nv_ynf(__a, __b); } // BEGIN INTRINSICS float __cosf(float __a) { return __nv_fast_cosf(__a); } float __exp10f(float __a) { return __nv_fast_exp10f(__a); } float __expf(float __a) { return __nv_fast_expf(__a); } float __fadd_rd(float __a, float __b) { return __nv_fadd_rd(__a, __b); } float __fadd_rn(float __a, float __b) { return __nv_fadd_rn(__a, __b); } float __fadd_ru(float __a, float __b) { return __nv_fadd_ru(__a, __b); } float __fadd_rz(float __a, float __b) { return __nv_fadd_rz(__a, __b); } float __fdiv_rd(float __a, float __b) { return __nv_fdiv_rd(__a, __b); } float __fdiv_rn(float __a, float __b) { return __nv_fdiv_rn(__a, __b); } float __fdiv_ru(float __a, float __b) { return __nv_fdiv_ru(__a, __b); } float __fdiv_rz(float __a, float __b) { return __nv_fdiv_rz(__a, __b); } float __fdividef(float __a, float __b) { return __nv_fast_fdividef(__a, __b); } float __fmaf_rd(float __a, float __b, float __c) { return __nv_fmaf_rd(__a, __b, __c); } float __fmaf_rn(float __a, float __b, float __c) { return __nv_fmaf_rn(__a, __b, __c); } float __fmaf_ru(float __a, float __b, float __c) { return __nv_fmaf_ru(__a, __b, __c); } float __fmaf_rz(float __a, float __b, float __c) { return __nv_fmaf_rz(__a, __b, __c); } float __fmul_rd(float __a, float __b) { return __nv_fmul_rd(__a, __b); } float __fmul_rn(float __a, float __b) { return __nv_fmul_rn(__a, __b); } float __fmul_ru(float __a, float __b) { return __nv_fmul_ru(__a, __b); } float __fmul_rz(float __a, float __b) { return __nv_fmul_rz(__a, __b); } float __frcp_rd(float __a) { return __nv_frcp_rd(__a); } float __frcp_rn(float __a) { return __nv_frcp_rn(__a); } float __frcp_ru(float __a) { return __nv_frcp_ru(__a); } float __frcp_rz(float __a) { return __nv_frcp_rz(__a); } float __fsqrt_rd(float __a) { return __nv_fsqrt_rd(__a); } float __fsqrt_rn(float __a) { return __nv_fsqrt_rn(__a); } float __fsqrt_ru(float __a) { return __nv_fsqrt_ru(__a); } float __fsqrt_rz(float __a) { return __nv_fsqrt_rz(__a); } float __fsub_rd(float __a, float __b) { return __nv_fsub_rd(__a, __b); } float __fsub_rn(float __a, float __b) { return __nv_fsub_rn(__a, __b); } float __fsub_ru(float __a, float __b) { return __nv_fsub_ru(__a, __b); } float __fsub_rz(float __a, float __b) { return __nv_fsub_rz(__a, __b); } float __log10f(float __a) { return __nv_fast_log10f(__a); } float __log2f(float __a) { return __nv_fast_log2f(__a); } float __logf(float __a) { return __nv_fast_logf(__a); } float __powf(float __a, float __b) { return __nv_fast_powf(__a, __b); } float __saturatef(float __a) { return __nv_saturatef(__a); } void __sincosf(float __a, float *__sptr, float *__cptr) { return __nv_fast_sincosf(__a, __sptr, __cptr); } float __sinf(float __a) { return __nv_fast_sinf(__a); } float __tanf(float __a) { return __nv_fast_tanf(__a); } // BEGIN DOUBLE double acos(double __a) { return __nv_acos(__a); } double acosh(double __a) { return __nv_acosh(__a); } double asin(double __a) { return __nv_asin(__a); } double asinh(double __a) { return __nv_asinh(__a); } double atan(double __a) { return __nv_atan(__a); } double atan2(double __a, double __b) { return __nv_atan2(__a, __b); } double atanh(double __a) { return __nv_atanh(__a); } double cbrt(double __a) { return __nv_cbrt(__a); } double ceil(double __a) { return __nv_ceil(__a); } double copysign(double __a, double __b) { return __nv_copysign(__a, __b); } double cos(double __a) { return __nv_cos(__a); } double cosh(double __a) { return __nv_cosh(__a); } double cospi(double __a) { return __nv_cospi(__a); } double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); } double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); } double erf(double __a) { return __nv_erf(__a); } double erfc(double __a) { return __nv_erfc(__a); } double erfcinv(double __a) { return __nv_erfcinv(__a); } double erfcx(double __a) { return __nv_erfcx(__a); } double erfinv(double __a) { return __nv_erfinv(__a); } double exp(double __a) { return __nv_exp(__a); } double exp10(double __a) { return __nv_exp10(__a); } double exp2(double __a) { return __nv_exp2(__a); } double expm1(double __a) { return __nv_expm1(__a); } double fabs(double __a) { return __nv_fabs(__a); } double fdim(double __a, double __b) { return __nv_fdim(__a, __b); } double floor(double __f) { return __nv_floor(__f); } double fma(double __a, double __b, double __c) { return __nv_fma(__a, __b, __c); } double fmax(double __a, double __b) { return __nv_fmax(__a, __b); } double fmin(double __a, double __b) { return __nv_fmin(__a, __b); } double fmod(double __a, double __b) { return __nv_fmod(__a, __b); } double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); } double hypot(double __a, double __b) { return __nv_hypot(__a, __b); } int ilogb(double __a) { return __nv_ilogb(__a); } int __finite(double __a) { return __nv_isfinited(__a); } int __isinf(double __a) { return __nv_isinfd(__a); } int __isnan(double __a) { return __nv_isnand(__a); } double j0(double __a) { return __nv_j0(__a); } double j1(double __a) { return __nv_j1(__a); } double jn(int __n, double __a) { return __nv_jn(__n, __a); } double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); } double lgamma(double __a) { return __nv_lgamma(__a); } long long llrint(double __a) { return __nv_llrint(__a); } long long llround(double __a) { return __nv_llround(__a); } double log(double __a) { return __nv_log(__a); } double log10(double __a) { return __nv_log10(__a); } double log1p(double __a) { return __nv_log1p(__a); } double log2(double __a) { return __nv_log2(__a); } double logb(double __a) { return __nv_logb(__a); } #if defined(__LP64__) long lrint(double __a) { return llrint(__a); } long lround(double __a) { return llround(__a); } #else long lrint(double __a) { return (long)rint(__a); } long lround(double __a) { return round(__a); } #endif double modf(double __a, double *__b) { return __nv_modf(__a, __b); } // nan - missing double nearbyint(double __a) { return __nv_nearbyint(__a); } double nextafter(double __a, double __b) { return __nv_nextafter(__a, __b); } double norm(int __dim, const double *__t) { return __nv_norm(__dim, __t); } double norm3d(double __a, double __b, double __c) { return __nv_norm3d(__a, __b, __c); } double norm4d(double __a, double __b, double __c, double __d) { return __nv_norm4d(__a, __b, __c, __d); } double normcdf(double __a) { return __nv_normcdf(__a); } double normcdfinv(double __a) { return __nv_normcdfinv(__a); } double pow(double __a, double __b) { return __nv_pow(__a, __b); } double rcbrt(double __a) { return __nv_rcbrt(__a); } double remainder(double __a, double __b) { return __nv_remainder(__a, __b); } double remquo(double __a, double __b, int *__c) { return __nv_remquo(__a, __b, __c); } double rhypot(double __a, double __b) { return __nv_rhypot(__a, __b); } double rint(double __a) { return __nv_rint(__a); } double rnorm(int __a, const double *__b) { return __nv_rnorm(__a, __b); } double rnorm3d(double __a, double __b, double __c) { return __nv_rnorm3d(__a, __b, __c); } double rnorm4d(double __a, double __b, double __c, double __d) { return __nv_rnorm4d(__a, __b, __c, __d); } double round(double __a) { return __nv_round(__a); } double rsqrt(double __a) { return __nv_rsqrt(__a); } double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); } double scalbln(double __a, long __b) { if (__b > INT_MAX) return __a > 0 ? HUGE_VAL : -HUGE_VAL; if (__b < INT_MIN) return __a > 0 ? 0.0 : -0.0; return scalbn(__a, (int)__b); } int __signbit(double __a) { return __nv_signbitd(__a); } double sin(double __a) { return __nv_sin(__a); } void sincos(double __a, double *__sptr, double *__cptr) { return __nv_sincos(__a, __sptr, __cptr); } void sincospi(double __a, double *__sptr, double *__cptr) { return __nv_sincospi(__a, __sptr, __cptr); } double sinh(double __a) { return __nv_sinh(__a); } double sinpi(double __a) { return __nv_sinpi(__a); } double sqrt(double __a) { return __nv_sqrt(__a); } double tan(double __a) { return __nv_tan(__a); } double tanh(double __a) { return __nv_tanh(__a); } double tgamma(double __a) { return __nv_tgamma(__a); } double trunc(double __a) { return __nv_trunc(__a); } double y0(double __a) { return __nv_y0(__a); } double y1(double __a) { return __nv_y1(__a); } double yn(int __a, double __b) { return __nv_yn(__a, __b); } // BEGIN INTRINSICS double __dadd_rd(double __a, double __b) { return __nv_dadd_rd(__a, __b); } double __dadd_rn(double __a, double __b) { return __nv_dadd_rn(__a, __b); } double __dadd_ru(double __a, double __b) { return __nv_dadd_ru(__a, __b); } double __dadd_rz(double __a, double __b) { return __nv_dadd_rz(__a, __b); } double __ddiv_rd(double __a, double __b) { return __nv_ddiv_rd(__a, __b); } double __ddiv_rn(double __a, double __b) { return __nv_ddiv_rn(__a, __b); } double __ddiv_ru(double __a, double __b) { return __nv_ddiv_ru(__a, __b); } double __ddiv_rz(double __a, double __b) { return __nv_ddiv_rz(__a, __b); } double __dmul_rd(double __a, double __b) { return __nv_dmul_rd(__a, __b); } double __dmul_rn(double __a, double __b) { return __nv_dmul_rn(__a, __b); } double __dmul_ru(double __a, double __b) { return __nv_dmul_ru(__a, __b); } double __dmul_rz(double __a, double __b) { return __nv_dmul_rz(__a, __b); } double __drcp_rd(double __a) { return __nv_drcp_rd(__a); } double __drcp_rn(double __a) { return __nv_drcp_rn(__a); } double __drcp_ru(double __a) { return __nv_drcp_ru(__a); } double __drcp_rz(double __a) { return __nv_drcp_rz(__a); } double __dsqrt_rd(double __a) { return __nv_dsqrt_rd(__a); } double __dsqrt_rn(double __a) { return __nv_dsqrt_rn(__a); } double __dsqrt_ru(double __a) { return __nv_dsqrt_ru(__a); } double __dsqrt_rz(double __a) { return __nv_dsqrt_rz(__a); } double __dsub_rd(double __a, double __b) { return __nv_dsub_rd(__a, __b); } double __dsub_rn(double __a, double __b) { return __nv_dsub_rn(__a, __b); } double __dsub_ru(double __a, double __b) { return __nv_dsub_ru(__a, __b); } double __dsub_rz(double __a, double __b) { return __nv_dsub_rz(__a, __b); } double __fma_rd(double __a, double __b, double __c) { return __nv_fma_rd(__a, __b, __c); } double __fma_rn(double __a, double __b, double __c) { return __nv_fma_rn(__a, __b, __c); } double __fma_ru(double __a, double __b, double __c) { return __nv_fma_ru(__a, __b, __c); } double __fma_rz(double __a, double __b, double __c) { return __nv_fma_rz(__a, __b, __c); } // END DOUBLE #pragma omp end declare target
dacemath.c
/****************************************************************************** * * * DIFFERENTIAL ALGEBRA CORE ENGINE * * * ******************************************************************************* * * * Copyright 2016 Politecnico di Milano (2014 Dinamica Srl) * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * *******************************************************************************/ /* * dacemath.c * * Created on: November 18, 2016 * Author: Politecnico di Milano */ /** \addtogroup DACE Core * @{ */ // MS C library needs this to trigger it to define math constants #define _USE_MATH_DEFINES #include <math.h> #include <stdlib.h> #include "dace/config.h" #include "dace/dacebase.h" #include "dace/daceaux.h" #include "dacecontrib.h" // define various math constants in case they have not been defined by math.h // these are non-standard C, but most C libraries have them #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #ifndef M_PI_2 #define M_PI_2 (1.57079632679489661923) #endif /******************************************************************************** * Basic DACE arithmetic operations *********************************************************************************/ /*! Perform addition of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceAdd(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { if(!daceIsSameObject(ina, inc) && !daceIsSameObject(inb, inc)) { daceWeightedSum(ina, 1.0, inb, 1.0, inc); } else { DACEDA idaadd; daceAllocateDA(&idaadd, 0); daceWeightedSum(ina, 1.0, inb, 1.0, &idaadd); daceCopy(&idaadd, inc); daceFreeDA(&idaadd); } } /*! Perform subtraction of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceSubtract(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { if(!daceIsSameObject(ina, inc) && !daceIsSameObject(inb, inc)) { daceWeightedSum(ina, 1.0, inb, -1.0, inc); } else { DACEDA idasub; daceAllocateDA(&idasub, 0); daceWeightedSum(ina, 1.0, inb, -1.0, &idasub); daceCopy(&idasub, inc); daceFreeDA(&idasub); } } /*! Perform multiplication of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceMultiply(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { // These should use thread local storage (TLS) for multithread safe implementations // see https://en.wikipedia.org/wiki/Thread-local_storage #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC static DACE_THREAD_LOCAL double cc[DACE_STATIC_NMMAX] = {0}; static DACE_THREAD_LOCAL extended_monomial emb[DACE_STATIC_NMMAX]; static DACE_THREAD_LOCAL extended_monomial *ipbeg[DACE_STATIC_NOMAX+1]; static DACE_THREAD_LOCAL extended_monomial *ipend[DACE_STATIC_NOMAX+1]; static DACE_THREAD_LOCAL unsigned int nomax = 0; static DACE_THREAD_LOCAL unsigned int nvmax = 0; // make sure static memory is correctly allocated if(UNLIKELY(nomax != DACECom.nomax || nvmax != DACECom.nvmax)) { nomax = DACECom.nomax; nvmax = DACECom.nvmax; ipbeg[0] = &emb[0]; for(unsigned int i = 1; i <= DACECom.nomax; i++) ipbeg[i] = emb + daceCountMonomials(i - 1, DACECom.nvmax); } #else static DACE_THREAD_LOCAL double *cc = NULL; static DACE_THREAD_LOCAL extended_monomial *emb = NULL; static DACE_THREAD_LOCAL extended_monomial **ipbeg = NULL; static DACE_THREAD_LOCAL extended_monomial **ipend = NULL; static DACE_THREAD_LOCAL unsigned int nomax = 0; static DACE_THREAD_LOCAL unsigned int nvmax = 0; // make sure static memory is correctly allocated if(UNLIKELY(nomax != DACECom.nomax || nvmax != DACECom.nvmax)) { nomax = DACECom.nomax; nvmax = DACECom.nvmax; dacefree(cc); dacefree(emb); dacefree(ipbeg); dacefree(ipend); cc = (double*) dacecalloc(DACECom.nmmax, sizeof(double)); emb = (extended_monomial*) dacecalloc(DACECom.nmmax, sizeof(extended_monomial)); ipbeg = (extended_monomial**) dacecalloc(DACECom.nomax+1, sizeof(extended_monomial*)); ipend = (extended_monomial**) dacecalloc(DACECom.nomax+1, sizeof(extended_monomial*)); ipbeg[0] = &emb[0]; for(unsigned int i = 1; i <= DACECom.nomax; i++) ipbeg[i] = emb + daceCountMonomials(i - 1, DACECom.nvmax); } #endif monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); // sort so that ina is the short DA vector if(illa>illb) { unsigned int t1; t1 = illb; illb = illa; illa = t1; t1 = ilmb; ilmb = ilma; ilma = t1; monomial* t2; t2 = ipoa; ipoa = ipob; ipob = t2; } for(unsigned int i = 0; i <= DACECom_t.nocut; i++) ipend[i] = ipbeg[i]; // sort vector b by order for(monomial *ib = ipob; ib < ipob+illb; ib++) { const unsigned int noib = DACECom.ieo[ib->ii]; if(noib > DACECom_t.nocut) continue; ipend[noib]->i1 = DACECom.ie1[ib->ii]; ipend[noib]->i2 = DACECom.ie2[ib->ii]; ipend[noib]->cc = ib->cc; ipend[noib]++; } // perform actual multiplication for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { const unsigned int i1ia = DACECom.ie1[ia->ii]; const unsigned int i2ia = DACECom.ie2[ia->ii]; const double ccia = ia->cc; // Note: all of these inner loops can safely be run in parallel //#pragma omp parallel for for(int noib = DACECom_t.nocut-DACECom.ieo[ia->ii]; noib >= 0; noib--) { for(extended_monomial *ib = ipbeg[noib]; ib < ipend[noib]; ib++) { const unsigned int ic = DACECom.ia1[i1ia+ib->i1] + DACECom.ia2[i2ia+ib->i2]; cc[ic] += ccia*ib->cc; } } } dacePack(cc, inc); } /*! Multiply two DA vectors component-wise, i.e. each monomial of ina with the corresponding monomial of inb \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. \sa daceEvalMonomials */ void daceMultiplyMonomials(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); daceVariableInformation(inc, &ipoc, &ilmc, &illc); monomial *ib = ipob, *ic = ipoc; monomial *const ibmax = ipob + ilmb, *const icmax = ipoc + ilmc; for (monomial *i = ipoa; i < ipoa + illa; i++) { while (ib->ii < i->ii && ib < ibmax) ib++; if (ib == ibmax) break; if (ib->ii == i->ii) { if (ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->cc = i->cc*ib->cc; ic->ii = i->ii; ic++; } } } /*! Perform division of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceDivide(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { DACEDA idadiv; daceAllocateDA(&idadiv, 0); daceMultiplicativeInverse(inb, &idadiv); daceMultiply(ina, &idadiv, inc); daceFreeDA(&idadiv); } /*! Square a DA object. \param[in] ina Pointer to the DA object to square \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSquare(const DACEDA *ina, DACEDA *inb) { daceMultiply(ina, ina, inb); } /*! Add constant to a DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to add \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceAddDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { if(!daceIsSameObject(ina, inb)) daceCopy(ina, inb); daceSetCoefficient0(inb, 0, daceGetConstant(inb)+ckon); } /*! Subtract DA object from constant. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to subtract from \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceDoubleSubtract(const DACEDA *ina, const double ckon, DACEDA *inb) { daceMultiplyDouble(ina, -1.0, inb); daceSetCoefficient0(inb, 0, daceGetConstant(inb)+ckon); } /*! Subtract constant from a DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to subtract \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceSubtractDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { daceAddDouble(ina, -ckon, inb); } /*! Multiply constant and DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to multiply by \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceMultiplyDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); monomial *ib = ipob; if(illa <= ilmb) { for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { if(DACECom.ieo[ia->ii] > DACECom_t.nocut) continue; const double c = ia->cc*ckon; if(fabs(c) < DACECom_t.eps) continue; ib->cc = c; ib->ii = ia->ii; ib++; } } else { monomial *const ibmax = ipob+ilmb; for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { if(DACECom.ieo[ia->ii] > DACECom_t.nocut) continue; const double c = ia->cc*ckon; if(fabs(c) < DACECom_t.eps) continue; if(ib >= ibmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ib->cc = c; ib->ii = ia->ii; ib++; } } daceSetLength(inb, ib-ipob); } /*! Divide DA object by a constant. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to divide by \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceDivideDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { if(ckon == 0.0) { daceSetError(__func__, DACE_ERROR, 41); daceCreateConstant(inb, 0.0); return; } daceMultiplyDouble(ina, 1.0/ckon, inb); } /*! Divide constant by DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to divide \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDoubleDivide(const DACEDA *ina, const double ckon, DACEDA *inc) { daceMultiplicativeInverse(ina, inc); daceMultiplyDouble(inc, ckon, inc); } /*! Divide a DA vector by a single variable to some power, if possible. \param[in] ina Pointer to the DA object to operate on \param[in] var Number of the independent variable by which to divide \param[in] p Power of independent variable \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDivideByVariable(const DACEDA *ina, const unsigned int var, const unsigned int p, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(var < 1 || var > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } // treat a few special cases if(p == 0) { // dividing by 1 daceCopy(ina, inc); return; } else if(illa == 0) { // dividing 0 by anything daceCreateConstant(inc, 0.0); return; } else if(p > DACECom.nomax) { // dividing non-zero DA by too high a power daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = var-1; if(var > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(var > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; if(ipow < p) { daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2-p*idiv]; ic->cc = i->cc; ic++; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; if(ipow < p) { daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1-p*idiv] + DACECom.ia2[ic2]; ic->cc = i->cc; ic++; } } daceSetLength(inc, ic-ipoc); } /*! Derivative of DA object with respect to a given independent variable. \param[in] idif Number of the independent variable with respect to which the derivative is taken \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDifferentiate(const unsigned int idif, const DACEDA *ina, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(idif < 1 || idif > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = idif-1; if(idif > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(idif > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; if(ipow == 0 || DACECom.ieo[i->ii] > DACECom_t.nocut+1) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2-idiv]; ic->cc = i->cc*ipow; ic++; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; if(ipow == 0 || DACECom.ieo[i->ii] > DACECom_t.nocut+1) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1-idiv] + DACECom.ia2[ic2]; ic->cc = i->cc*ipow; ic++; } } daceSetLength(inc, ic-ipoc); } /*! Integral of DA object with respect to a given independent variable. \param[in] idif Number of the independent variable with respect to which the integral is taken \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceIntegrate(const unsigned int iint, const DACEDA *ina, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(iint < 1 || iint > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = iint-1; if(iint > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(iint > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { if(DACECom.ieo[i->ii] >= DACECom_t.nocut) continue; const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; const double ccc = i->cc/(ipow+1); if(fabs(ccc) < DACECom_t.eps) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2+idiv]; ic->cc = ccc; ic = ic+1; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { if(DACECom.ieo[i->ii] >= DACECom_t.nocut) continue; const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; const double ccc = i->cc/(ipow+1); if(fabs(ccc) < DACECom_t.eps) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1+idiv] + DACECom.ia2[ic2]; ic->cc = ccc; ic = ic+1; } } daceSetLength(inc, ic-ipoc); } /******************************************************************************** * DACE intrinsic function routines *********************************************************************************/ /*! Truncate the constant part of a DA object to an integer. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceTruncate(const DACEDA *ina, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, rint(daceGetConstant(inc))); } /*! Round the constant part of a DA object to an integer. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceRound(const DACEDA *ina, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, round(daceGetConstant(inc))); } /*! Modulo the constant part of a DA object by p. \param[in] ina Pointer to the DA object to operate on \param[in] p Value with respect to which to compute the modulo \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceModulo(const DACEDA *ina, const double p, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, fmod(daceGetConstant(inc),p)); } /*! Raise a DA object to the p-th power. \param[in] ina Pointer to the DA object to operate on \param[in] p Power to which to raise the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePowerDouble(const DACEDA *ina, const double p, DACEDA *inc) { // check simple cases if(p == 0.0) { daceCreateConstant(inc, 1.0); return; } else if(p == (int)p) { dacePower(ina, (int)p, inc); return; } const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 43); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = pow(a0, p); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = xf[i-1]/i*(p-(i-1)); daceDivideDouble(ina, a0, inc); // more accurate than including a0 in series (uses non-linear part in EvaluateSeries) daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Raise a DA object to the p-th integer power. \param[in] ina Pointer to the DA object to operate on \param[in] p Power to which to raise the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePower(const DACEDA *ina, const int np, DACEDA *inc) { DACEDA itemp; // handle some common simple cases directly switch(np) { case 0: daceCreateConstant(inc, 1.0); return; case 1: daceCopy(ina, inc); return; case -1: daceMultiplicativeInverse(ina, inc); return; } // handle all other cases, again with common special cases hard coded switch(abs(np)) { case 2: daceSquare(ina, inc); break; case 3: daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceMultiply(ina, &itemp, inc); daceFreeDA(&itemp); break; case 4: daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceSquare(&itemp, inc); daceFreeDA(&itemp); break; default: daceAllocateDA(&itemp, 0); daceCopy(ina, &itemp); daceCreateConstant(inc, 1.0); unsigned int inp = abs(np); while(inp) { if(inp & 1u) daceMultiply(inc, &itemp, inc); inp >>= 1; if(inp) daceSquare(&itemp, &itemp); } daceFreeDA(&itemp); } if(np < 0) daceMultiplicativeInverse(inc, inc); } /*! Take the np-th root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[in] np Root to take of the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceRoot(const DACEDA *ina, const int np, DACEDA *inc) { if(np == 0) { daceSetError(__func__, DACE_ERROR, 44); daceCreateConstant(inc, 0.0); return; } const double a0 = daceGetConstant(ina); const unsigned int iodd = abs(np) & 1u; if((iodd == 0) && (a0 <= 0.0)) { daceSetError(__func__, DACE_ERROR, 45); daceCreateConstant(inc, 0.0); return; } else if((iodd == 1) && (a0 == 0.0)) { daceSetError(__func__, DACE_ERROR, 46); daceCreateConstant(inc, 0.0); return; } double cr = 1.0/np; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = copysign(pow(fabs(a0), cr), a0); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-1]/i*cr; cr--; } daceDivideDouble(ina, a0, inc); // more accurate than including a0 in series (uses non-linear part in EvaluateSeries) daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the multiplicative inverse of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceMultiplicativeInverse(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 == 0.0) { daceSetError(__func__, DACE_ERROR, 41); daceCreateConstant(inc, 0.0); return; } if(DACECom_t.nocut < 5) { // lower orders: compute series directly daceMultiplicativeInverse0(ina, inc, a0); } else { // higher orders: use iteration const unsigned int nocut = DACECom_t.nocut; DACECom_t.nocut = 2; daceMultiplicativeInverse0(ina, inc, a0); DACEDA temp; daceAllocateDA(&temp, 0); for(unsigned int ord = 3; ord <= nocut; ord *= 2) { DACECom_t.nocut = umin(nocut, 2*ord-1); daceMultiply(ina, inc, &temp); daceDoubleSubtract(&temp, 2.0, &temp); daceMultiply(inc, &temp, inc); } daceFreeDA(&temp); } } /*! Compute the multiplicative inverse of a DA object using series expansion. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \param[in] a0 Constant part of ina \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceMultiplicativeInverse0(const DACEDA *ina, DACEDA *inc, const double a0) { daceDivideDouble(ina, a0, inc); #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = 1.0/a0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = -xf[i-1]; daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the square root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSquareRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, 2, inc); } /*! Compute the inverse square root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceInverseSquareRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, -2, inc); } /*! Compute the cubic root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceCubicRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, 3, inc); } /*! Compute the inverse cubic root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceInverseCubicRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, -3, inc); } /*! Compute the hypothenuse of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the second DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceHypotenuse(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { DACEDA itemp1, itemp2; daceAllocateDA(&itemp1, 0); daceAllocateDA(&itemp2, 0); daceSquare(ina, &itemp1); daceSquare(inb, &itemp2); daceAdd(&itemp1, &itemp2, inc); daceRoot(inc, 2, inc); daceFreeDA(&itemp2); daceFreeDA(&itemp1); } /*! Compute the exponential of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceExponential(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = exp(daceGetConstant(ina)); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = xf[i-1]/i; daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the natural logarithm root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0) { daceSetError(__func__, DACE_ERROR, 47); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif daceDivideDouble(ina, a0, inc); xf[0] = log(a0); xf[1] = 1.0; for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-1]/i*(i-1); } daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the logarithm with respect to base b of a DA object. \param[in] ina Pointer to the DA object to operate on \param[in] b Base of the logarithm to use \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithmBase(const DACEDA *ina, const double b, DACEDA *inc) { if(b <= 0) { daceSetError(__func__, DACE_ERROR, 48); daceCreateConstant(inc, 0.0); return; } daceLogarithm(ina, inc); daceMultiplyDouble(inc, 1.0/log(b), inc); } /*! Compute the decadic logarithm of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm10(const DACEDA *ina, DACEDA *inc) { daceLogarithmBase(ina, 10.0, inc); } /*! Compute the binary logarithm of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm2(const DACEDA *ina, DACEDA *inc) { daceLogarithmBase(ina, 2.0, inc); } /*! Compute the sine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = sin(a0); xf[1] = cos(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the cosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceCosine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = cos(a0); xf[1] = -sin(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the tangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(cos(daceGetConstant(ina)) == 0.0) { daceSetError(__func__, DACE_ERROR, 49); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSine(ina, &itemp); daceCosine(ina, inc); daceDivide(&itemp, inc, inc); daceFreeDA(&itemp); } /*! Compute the arcsine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcSine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceDoubleSubtract(&itemp, 1.0, &itemp); daceSquareRoot(&itemp, &itemp); daceDivide(ina, &itemp, inc); daceArcTangent(inc, inc); daceFreeDA(&itemp); } /*! Compute the arccosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcCosine(const DACEDA *ina, DACEDA *inc) { if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceArcSine(ina, inc); daceDoubleSubtract(inc, M_PI_2, inc); } /*! Compute the arctangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcTangent(const DACEDA *ina, DACEDA *inc) { DACEDA iarg; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1] = {0}; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); daceAllocateDA(&iarg, 0); daceMultiplyDouble(ina, a0, &iarg); daceAddDouble(&iarg, 1.0, &iarg); daceSubtractDouble(ina, a0, inc); daceDivide(inc, &iarg, &iarg); double s = 1.0; xf[0] = atan(a0); for(unsigned int i = 1; i < DACECom_t.nocut+1; i+=2) { xf[i] = s/i; s = -s; } daceEvaluateSeries(&iarg, xf, inc); daceFreeDA(&iarg); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Arctangent of ina/inb with proper sign in [-pi, pi]. This function follows the C standard atan2(y,x) function syntax. \param[in] ina Pointer to the first DA object to operate on \param[in] ina Pointer to the second DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcTangent2(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { const double cx = daceGetConstant(inb); const double cy = daceGetConstant(ina); if(cx == 0.0 && cy == 0.0) { daceCreateConstant(inc, 0.0); } else { if(fabs(cy) > fabs(cx)) { daceDivide(inb, ina, inc); daceArcTangent(inc, inc); if(cy < 0.0) { daceDoubleSubtract(inc, -M_PI_2, inc); } else { daceDoubleSubtract(inc, M_PI_2, inc); } } else { daceDivide(ina, inb, inc); daceArcTangent(inc, inc); if(cx < 0.0) { if(cy > 0.0) { daceAddDouble(inc, M_PI, inc); } else { daceAddDouble(inc, -M_PI, inc); } } } } } /*! Compute the hyperbolic sine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicSine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = sinh(a0); xf[1] = cosh(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the hyperbolic cosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicCosine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = cosh(a0); xf[1] = sinh(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the hyperbolic tangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; daceAllocateDA(&itemp, 0); daceHyperbolicSine(ina, &itemp); daceHyperbolicCosine(ina, inc); daceDivide(&itemp, inc, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arcsince of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcSine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; daceAllocateDA(&itemp, 0); daceSquare(ina, inc); daceAddDouble(inc, 1.0, &itemp); daceSquareRoot(&itemp, inc); daceAdd(ina, inc, &itemp); daceLogarithm(&itemp, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arccosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcCosine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(daceGetConstant(ina) <= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSquare(ina, inc); daceSubtractDouble(inc, 1.0, &itemp); daceSquareRoot(&itemp, inc); daceAdd(ina, inc, &itemp); daceLogarithm(&itemp, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arctangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceAddDouble(ina, 1.0, &itemp); daceDoubleSubtract(ina, 1.0, inc); daceDivide(&itemp, inc, inc); daceLogarithm(inc, &itemp); daceMultiplyDouble(&itemp, 0.5, inc); daceFreeDA(&itemp); } /*! Compute the error function of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceErrorFunction(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); double factor = 2.0*exp(-a0*a0)/sqrt(M_PI); xf[0] = erf(a0); xf[1] = factor; double Hi2 = 1.0; // Hermite polynomial H_{i-2} = H_0 double Hi1 = 2.0*a0; // Hermite polynomial H_{i-1} = H_1 for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { factor /= -((double)i); xf[i] = factor*Hi1; const double temp = 2.0*a0*Hi1 - 2.0*(i-1)*Hi2; // recursion relation: H_i = 2*x*H_{i-1} - 2*(i-1)*H_{i-2} Hi2 = Hi1; Hi1 = temp; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the complementary error function of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceComplementaryErrorFunction(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); double factor = -2.0*exp(-a0*a0)/sqrt(M_PI); xf[0] = erfc(a0); xf[1] = factor; double Hi2 = 1.0; // Hermite polynomial H_{i-2} = H_0 double Hi1 = 2.0*a0; // Hermite polynomial H_{i-1} = H_1 for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { factor /= -((double)i); xf[i] = factor*Hi1; const double temp = 2.0*a0*Hi1 - 2.0*(i-1)*Hi2; // recursion relation: H_i = 2*x*H_{i-1} - 2*(i-1)*H_{i-2} Hi2 = Hi1; Hi1 = temp; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /// @cond // Wrappers for contributed netlib Bessel functions (not for public use) /*! Compute value of Bessel functions J_n, Y_n for n in [n0, n1]. \param[in] x function argument (non-negative) \param[in] n0 Lowest order of the Bessel functions to calculate (n0 <= n1) \param[in] n1 Highest order of the Bessel functions to calculate (n0 <= n1) \param[in] type Type of function to evaluate: -1: Bessel J function 1: Bessel Y function \param[out] bz Array of size n1-n0+1 containing the values of B_{n0}, B_{n0+1}, ..., B_{n1} \return Returns 0 if all values are calculated accurately, -1 if x is too large to calculate the result or another error occured, or +1 if some of the results are of reduced accuracy. */ int BesselWrapper(const double x, const int n0, const int n1, const int type, double *bz) { long int nb = (abs(n0) > abs(n1) ? abs(n0) : abs(n1))+1, ncalc; double xx = x, alpha = 0.0; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC #define DACE_STATIC_MAX_BESSEL_ORDER 100 if( DACE_STATIC_MAX_BESSEL_ORDER < nb ) return -1; double b[DACE_STATIC_MAX_BESSEL_ORDER]; #else double* b = (double*) dacecalloc(nb, sizeof(double)); #endif if(type < 0) rjbesl_(&xx, &alpha, &nb, b, &ncalc); else rybesl_(&xx, &alpha, &nb, b, &ncalc); // discombobulate results if(ncalc >= 0) { ncalc = (ncalc == nb ? 0 : 1); double s = (n0%2 == 0 ? 1.0 : -1.0); for(int i = n0; i <= n1; i++) { if(i >= 0) *(bz++) = b[i]; else { *(bz++) = s*b[-i]; // for integer orders considered here, (-1)^n J_n = J_{-n}, and (-1)^n Y_n = Y_{-n} s *= -1.0; } } } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(b); #endif return ncalc < 0 ? -1 : ncalc; } /*! Compute value of modified Bessel functions I_n, K_n for n in [n0, n1]. \param[in] x function argument (non-negative) \param[in] n0 Lowest order of the Bessel functions to calculate (n0 <= n1) \param[in] n1 Highest order of the Bessel functions to calculate (n0 <= n1) \param[in] type Type of function to evaluate: -2: Bessel I function, scaled (i.e. exp(-x)*I_n(x)) -1: Bessel I function 1: Bessel K function 2: Bessel K function, scaled (i.e. exp(x)*K_n(x)) \param[out] bz Array of size n1-n0+1 containing the values of B_{n0}, B_{n0+1}, ..., B_{n1} \return Returns 0 if all values are calculated accurately, -1 if x is too large to calculate the result or another error occured, or +1 if some of the results are of reduced accuracy. */ int ModifiedBesselWrapper(const double x, const int n0, const int n1, const int type, double *bz) { long int nb = (abs(n0) > abs(n1) ? abs(n0) : abs(n1))+1, ize = abs(type), ncalc; double xx = x, alpha = 0.0; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC #define DACE_STATIC_MAX_BESSEL_ORDER 100 if( DACE_STATIC_MAX_BESSEL_ORDER < nb ) return -1; double b[DACE_STATIC_MAX_BESSEL_ORDER]; #else double* b = (double*) dacecalloc(nb, sizeof(double)); #endif if(type < 0) ribesl_(&xx, &alpha, &nb, &ize, b, &ncalc); else rkbesl_(&xx, &alpha, &nb, &ize, b, &ncalc); // discombobulate results if(ncalc >= 0) { ncalc = (ncalc == nb ? 0 : 1); for(int i = n0; i <= n1; i++) *(bz++) = b[abs(i)]; // for integer orders considered here, I_n = I_{-n}, and for all orders K_n = K_{-n} } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(b); #endif return ncalc < 0 ? -1 : ncalc; } /// @endcond /*! Compute the modified Bessel function I_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[in] scaled If true, the scaled Bessel function is computed (i.e. exp(-x)*I_n(x)) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselIFunction(const DACEDA *ina, const int n, const bool scaled, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = ModifiedBesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, scaled ? -2 : -1, bz); if(res >= 0) { if(scaled) daceEvaluateScaledModifiedBesselFunction(ina, bz, 1.0, inc); else daceEvaluateBesselFunction(ina, bz, 1.0, 1.0, inc); } else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the modified Bessel function K_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[in] scaled If true, the scaled Bessel function is computed (i.e. exp(x)*K_n(x)) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselKFunction(const DACEDA *ina, const int n, const bool scaled, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = ModifiedBesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, scaled ? 2 : 1, bz); if(res >= 0) { if(scaled) daceEvaluateScaledModifiedBesselFunction(ina, bz, -1.0, inc); else daceEvaluateBesselFunction(ina, bz, 1.0, -1.0, inc); } else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the Bessel function J_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselJFunction(const DACEDA *ina, const int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = BesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, -1, bz); if(res >= 0) daceEvaluateBesselFunction(ina, bz, -1.0, 1.0, inc); else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the Bessel function Y_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselYFunction(const DACEDA *ina, const int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = BesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, 1, bz); if(res >= 0) daceEvaluateBesselFunction(ina, bz, -1.0, 1.0, inc); else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Evaluate a Bessel function with coefficients bz with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] bz C array of 2*nocut+1 elements containing Bessel functions of orders n-nocut, ..., n+nocut \param[in] type Either -1.0 for normal Bessel functions, or +1.0 for modified Bessel functions. \param[in] ktype Either -1.0 for modified Bessel K function, or +1.0 for all other Bessel functions. \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateBesselFunction(const DACEDA *ina, const double bz[], const double type, const double ktype, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; double binomial[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); double* binomial = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = bz[DACECom_t.nocut]; binomial[0] = 1.0; double factor = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { factor *= ktype*0.5/i; // calculate binomial coefficients i choose j based on previously calculated i-1 choose j. binomial[i] = 1.0; for(unsigned int j = i-1; j > 0; j--) binomial[j] += binomial[j-1]; // Calculate n-th derivative of Bessel function C, see http://dlmf.nist.gov/10.6 // bz contains values of C_{n-o} to C_{n+o} of constant part of ina double sign = 1.0, c = 0.0; xf[i] = 0.0; for(unsigned int j = 0; j <= i; j++) { // use Kahan summation, since signs oscillate and magnitudes can also vary greatly const double y = binomial[j]*sign*bz[DACECom_t.nocut-i+2*j] - c; const double t = xf[i] + y; c = (t - xf[i]) - y; xf[i] = t; // in infinite precision the above is equivalent to: // xf[i] += binomial[j]*sign*bz[DACECom_t.nocut-i+2*j]; sign *= type; } xf[i] *= factor; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(binomial); dacefree(xf); #endif } /*! Evaluate a scaled modified Bessel function with coefficients bz with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] bz C array of 2*nocut+1 elements containing modified Bessel functions of orders n-nocut, ..., n+nocut \param[in] ktype Either -1.0 for scaled Bessel K function, or +1.0 for scaled Bessel I function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateScaledModifiedBesselFunction(const DACEDA *ina, const double bz[], const double ktype, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; double binomial[2*DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); double* binomial = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = bz[DACECom_t.nocut]; binomial[0] = 1.0; double factor = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { factor *= ktype*0.5/i; // calculate binomial coefficients 2*i-1 choose j based on previously calculated 2*i-2 choose j. binomial[2*i-1] = 1.0; for(unsigned int j = 2*i-2; j > 0; j--) binomial[j] += binomial[j-1]; // calculate binomial coefficients 2*i choose j based on previously calculated 2*i-1 choose j. binomial[2*i] = 1.0; for(unsigned int j = 2*i-1; j > 0; j--) binomial[j] += binomial[j-1]; // Calculate n-th derivative of Bessel function C // bz contains values of C_{n-o} to C_{n+o} of constant part of ina double sign = 1.0, c = 0.0; xf[i] = 0.0; for(unsigned int j = 0; j <= 2*i; j++) { // use Kahan summation, since signs oscillate and magnitudes can also vary greatly const double y = binomial[j]*sign*bz[DACECom_t.nocut-i+j] - c; const double t = xf[i] + y; c = (t - xf[i]) - y; xf[i] = t; // in infinite precision the above is equivalent to: // xf[i] += binomial[j]*sign*bz[DACECom_t.nocut-i+j]; sign *= -1.0; } xf[i] *= factor; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(binomial); dacefree(xf); #endif } /*! Compute the partial Logarithmic Gamma function of a DA object (without constant part). \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. \note No argument checking is performed to ensure values are within allowable range. */ void daceLogGammaFunction0(const DACEDA *ina, const double a0, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = 0.0; xf[1] = psi_(&a0); double s = 1.0; for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = (s/i)*zeta_(i, a0, NULL); s *= -1.0; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the Logarithmic Gamma function of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogGammaFunction(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceLogGammaFunction0(ina, a0, inc); daceSetCoefficient0(inc, 0, log(dgamma_(&a0))); } /*! Compute the Gamma function of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceGammaFunction(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceLogGammaFunction0(ina, a0, inc); daceExponential(inc, inc); daceMultiplyDouble(inc, dgamma_(&a0), inc); } /*! Compute the n-th Psi function (i.e. the n+1 derivative of the logarithmic gamma function) of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[in] n Order of the Psi function (n >= 0) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePsiFunction(const DACEDA *ina, const unsigned int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif if(n == 0) { xf[0] = psi_(&a0); double s = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { xf[i] = s*zeta_(i+1, a0, NULL); s *= -1.0; } } else { double fac = (n%2 ? 1.0 : -1.0); for(unsigned int i = 2; i <= n; i++) fac *= i; for(unsigned int i = 0; i < DACECom_t.nocut+1; i++) { xf[i] = fac*zeta_(n+i+1, a0, NULL); fac = -(fac/(i+1))*(n+i+1); } } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Evaluate a polynomial with coefficients xf with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] xf C array of nocut+1 elements containing the coefficients of the polynomial \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateSeries(const DACEDA *ina, const double xf[], DACEDA *inc) { DACEDA inon; const unsigned int nocut = DACECom_t.nocut; daceAllocateDA(&inon, 0); daceCopy(ina, &inon); daceSetCoefficient0(&inon, 0, 0.0); DACECom_t.nocut = 1; daceMultiplyDouble(&inon, xf[nocut], inc); daceAddDouble(inc, xf[nocut-1], inc); // evaluate series for(int i = nocut-2; i >= 0; i--) { DACECom_t.nocut = nocut-i; daceMultiply(&inon, inc, inc); daceAddDouble(inc, xf[i], inc); } DACECom_t.nocut = nocut; daceFreeDA(&inon); } /*! Compute the weighted sum of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] afac Weighting factor to multiply ina by \param[in] inb Pointer to the second DA object to operate on \param[in] bfac Weighting factor to multiply inb by \param[out] inc Pointer to the DA object to store the result in \note This routine is NOT aliasing safe! So inc MUST BE DIFFERENT from ina and inb. */ void daceWeightedSum(const DACEDA *ina, const double afac, const DACEDA *inb, const double bfac, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); daceVariableInformation(inc, &ipoc, &ilmc, &illc); monomial *ia = ipoa, *ib = ipob, *ic = ipoc; monomial *const iamax = ipoa+illa, *const ibmax = ipob+illb, *const icmax = ipoc+ilmc; if(illa > 0 && illb > 0) { // both polynomials have coefficients, merge until one runs out unsigned int ja = ia->ii; unsigned int jb = ib->ii; while(true) { if(ja == jb) { // add the two terms if(DACECom.ieo[ja] <= DACECom_t.nocut) { const double ccc = ia->cc*afac + ib->cc*bfac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ia->ii; ic++; } } ia++; ib++; if(ia >= iamax || ib >= ibmax) break; ja = ia->ii; jb = ib->ii; } else if(ja < jb) { // store term a if(DACECom.ieo[ja] <= DACECom_t.nocut) { const double ccc = ia->cc*afac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ia->ii; ic++; } } ia++; if(ia >= iamax) break; ja = ia->ii; } else { // store term b if(DACECom.ieo[jb] <= DACECom_t.nocut) { const double ccc = ib->cc*bfac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ib->ii; ic++; } } ib++; if(ib >= ibmax) break; jb = ib->ii; } } } // copy any remaining terms from either ina or inb monomial *ismin, *ismax; double fac; if(ia < iamax) { ismin = ia; ismax = iamax; fac = afac; } else { ismin = ib; ismax = ibmax; fac = bfac; } for(monomial *is = ismin; is < ismax; is++) { if(DACECom.ieo[is->ii] <= DACECom_t.nocut) { const double ccc = is->cc*fac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = is->ii; ic++; } } } daceSetLength(inc, ic-ipoc); } /** @}*/
utils.h
#pragma once #include <fcntl.h> #include <algorithm> #include <cassert> #include <cstdlib> #include <cstring> #include <fstream> #include <iostream> #include <memory> #include <random> #include <set> #ifdef __APPLE__ #else #include <malloc.h> #endif #ifdef _WINDOWS #include <Windows.h> typedef HANDLE FileHandle; #else #include <unistd.h> typedef int FileHandle; #endif #include "cached_io.h" #include "common_includes.h" #include "windows_customizations.h" #include "aligned_dtor.h" //#include "pq_flash_index.h" // taken from // https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h // round up X to the nearest multiple of Y #define ROUND_UP(X, Y) \ ((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y)) #define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) // round down X to the nearest multiple of Y #define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y)) // alignment tests #define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0) #define IS_512_ALIGNED(X) IS_ALIGNED(X, 512) #define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096) typedef uint64_t _u64; typedef int64_t _s64; typedef uint32_t _u32; typedef int32_t _s32; typedef uint16_t _u16; typedef int16_t _s16; typedef uint8_t _u8; typedef int8_t _s8; namespace diskann { enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 }; DISKANN_DLLEXPORT float calc_recall_set_tags( unsigned num_queries, unsigned *gold_std, unsigned dim_gs, unsigned *our_results_tags, unsigned dim_or, unsigned recall_at, unsigned subset_size, std::string gt_tag_filename, std::string current_tag_filename); inline void alloc_aligned(void **ptr, size_t size, size_t align) { *ptr = nullptr; assert(IS_ALIGNED(size, align)); #ifndef _WINDOWS *ptr = ::aligned_alloc(align, size); #else *ptr = ::_aligned_malloc(size, align); // note the swapped arguments! #endif assert(*ptr != nullptr); } inline void check_stop(std::string arnd) { int brnd; std::cout << arnd << std::endl; std::cin >> brnd; } inline void aligned_free(void *ptr) { // Gopal. Must have a check here if the pointer was actually allocated by // _alloc_aligned if (ptr == nullptr) { return; } #ifndef _WINDOWS free(ptr); #else ::_aligned_free(ptr); #endif } inline void GenRandom(std::mt19937 &rng, unsigned *addr, unsigned size, unsigned N) { for (unsigned i = 0; i < size; ++i) { addr[i] = rng() % (N - size); } std::sort(addr, addr + size); for (unsigned i = 1; i < size; ++i) { if (addr[i] <= addr[i - 1]) { addr[i] = addr[i - 1] + 1; } } unsigned off = rng() % N; for (unsigned i = 0; i < size; ++i) { addr[i] = (addr[i] + off) % N; } } inline void get_bin_metadata(const std::string &bin_file, size_t &nrows, size_t &ncols) { std::ifstream reader(bin_file.c_str(), std::ios::binary); uint32_t nrows_32, ncols_32; reader.read((char *) &nrows_32, sizeof(uint32_t)); reader.read((char *) &ncols_32, sizeof(uint32_t)); nrows = nrows_32; ncols = ncols_32; reader.close(); } template<typename T> inline void load_bin(const std::string &bin_file, T *&data, size_t &npts, size_t &dim) { _u64 read_blk_size = 64 * 1024 * 1024; cached_ifstream reader(bin_file, read_blk_size); // std::cout << "Reading bin file " << bin_file.c_str() << " ..." << // std::endl; size_t actual_file_size = reader.get_file_size(); int npts_i32, dim_i32; reader.read((char *) &npts_i32, sizeof(int)); reader.read((char *) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; // std::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << // "..." // << std::endl; size_t expected_actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t); if (actual_file_size != expected_actual_file_size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << actual_file_size << " while expected size is " << expected_actual_file_size << " npts = " << npts << " dim = " << dim << " size of <T>= " << sizeof(T) << std::endl; std::cout << stream.str(); throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } data = new T[npts * dim]; reader.read((char *) data, npts * dim * sizeof(T)); // std::cout << "Finished reading bin file." << std::endl; } inline void load_truthset(const std::string &bin_file, uint32_t *&ids, float *&dists, size_t &npts, size_t &dim, uint32_t **tags = nullptr) { _u64 read_blk_size = 64 * 1024 * 1024; cached_ifstream reader(bin_file, read_blk_size); // std::cout << "Reading truthset file " << bin_file.c_str() << " ..." // << std::endl; size_t actual_file_size = reader.get_file_size(); int npts_i32, dim_i32; reader.read((char *) &npts_i32, sizeof(int)); reader.read((char *) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; // std::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << // "..." // << std::endl; size_t expected_actual_file_size = 2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t); size_t with_tags_actual_file_size = 3 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t); if (actual_file_size != expected_actual_file_size && actual_file_size != with_tags_actual_file_size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << actual_file_size << " while expected size is " << expected_actual_file_size << " npts = " << npts << " dim = " << dim << std::endl; std::cout << stream.str(); throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } ids = new uint32_t[npts * dim]; reader.read((char *) ids, npts * dim * sizeof(uint32_t)); dists = new float[npts * dim]; reader.read((char *) dists, npts * dim * sizeof(float)); if (actual_file_size == with_tags_actual_file_size) { // std::cout << "Loading tags for groundtruth.\n"; *tags = new uint32_t[npts * dim]; reader.read((char *) *tags, npts * dim * sizeof(uint32_t)); } } template<typename T> inline void load_bin(const std::string &bin_file, std::unique_ptr<T[]> &data, size_t &npts, size_t &dim) { T *ptr; load_bin<T>(bin_file, ptr, npts, dim); data.reset(ptr); } template<typename T> inline void save_bin(const std::string &filename, T *data, size_t npts, size_t ndims) { std::ofstream writer(filename, std::ios::binary | std::ios::out); // std::cout << "Writing bin: " << filename.c_str() << "\n"; int npts_i32 = (int) npts, ndims_i32 = (int) ndims; writer.write((char *) &npts_i32, sizeof(int)); writer.write((char *) &ndims_i32, sizeof(int)); // std::cout << "bin: #pts = " << npts << ", #dims = " << ndims // << ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int) // << "B" << std::endl; // data = new T[npts_u64 * ndims_u64]; writer.write((char *) data, npts * ndims * sizeof(T)); writer.close(); // std::cout << "Finished writing bin." << std::endl; } template<typename T> inline void save_unaligned_bin(const std::string &filename, T *data, size_t npts, size_t ndims, size_t aligned_dim) { std::ofstream writer(filename, std::ios::binary | std::ios::out); // std::cout << "Writing unaligned bin : " << filename.c_str() << "\n"; int npts_i32 = (int) npts, ndims_i32 = (int) ndims; writer.write((char *) &npts_i32, sizeof(int)); writer.write((char *) &ndims_i32, sizeof(int)); // std::cout << "bin: #pts = " << npts << ", #dims = " << ndims // << ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int) // << "B" << std::endl; // data = new T[npts_u64 * ndims_u64]; for (size_t i = 0; i < npts; i++) { writer.write((char *) (data + i * aligned_dim), ndims * sizeof(T)); } writer.close(); // std::cout << "Finished writing bin." << std::endl; } template<typename T> inline void load_aligned_data(const std::string bin_file, T *&data, size_t &npts, size_t &dim, const size_t &rounded_dim) { if (data == nullptr) { std::cout << "Memory was not allocated for " << data << " before calling the load function. Exiting..." << std::endl; exit(-1); } _u64 read_blk_size = 64 * 1024 * 1024; cached_ifstream reader(bin_file, read_blk_size); // std::cout << "Reading bin file " << bin_file << " ..." << std::flush; size_t actual_file_size = reader.get_file_size(); int npts_i32, dim_i32; reader.read((char *) &npts_i32, sizeof(int)); reader.read((char *) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; size_t expected_actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t); if (actual_file_size != expected_actual_file_size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << actual_file_size << " while expected size is " << expected_actual_file_size << " npts = " << npts << " dim = " << dim << " size of <T>= " << sizeof(T) << std::endl; std::cout << stream.str() << std::endl; throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } for (size_t i = 0; i < npts; i++) { reader.read((char *) (data + i * rounded_dim), dim * sizeof(T)); memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T)); } } template<typename T> inline void load_aligned_bin(const std::string bin_file, T *&data, size_t &npts, size_t &dim, size_t &rounded_dim) { _u64 read_blk_size = 64 * 1024 * 1024; cached_ifstream reader(bin_file, read_blk_size); // std::cout << "Reading bin file " << bin_file << " ..." << std::flush; size_t actual_file_size = reader.get_file_size(); int npts_i32, dim_i32; reader.read((char *) &npts_i32, sizeof(int)); reader.read((char *) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; size_t expected_actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t); if (actual_file_size != expected_actual_file_size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << actual_file_size << " while expected size is " << expected_actual_file_size << " npts = " << npts << " dim = " << dim << " size of <T>= " << sizeof(T) << std::endl; std::cout << stream.str() << std::endl; throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } rounded_dim = ROUND_UP(dim, 8); size_t allocSize = npts * rounded_dim * sizeof(T); alloc_aligned(((void **) &data), allocSize, 8 * sizeof(T)); // std::cout << "done. Copying data..." << std::flush; for (size_t i = 0; i < npts; i++) { reader.read((char *) (data + i * rounded_dim), dim * sizeof(T)); memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T)); } // std::cout << " done." << std::endl; } template<typename T> inline void load_aligned_bin(const std::string & bin_file, std::unique_ptr<T[]> &data, size_t &npts, size_t &dim, size_t &rounded_dim) { T *ptr; load_aligned_bin(bin_file, ptr, npts, dim, rounded_dim); data.reset(ptr); } template<typename InType, typename OutType> void convert_types(const InType *srcmat, OutType *destmat, size_t npts, size_t dim) { #pragma omp parallel for schedule(static, 65536) for (int64_t i = 0; i < (_s64) npts; i++) { for (uint64_t j = 0; j < dim; j++) { destmat[i * dim + j] = (OutType) srcmat[i * dim + j]; } } } /********* templated load functions *********/ // template<typename T> // void load_Tvecs(const char *filename, T *&data, size_t &num, size_t &dim) // { // // check validity of file // std::ifstream in(filename, std::ios::binary | std::ios::ate); // if (!in.is_open()) { // std::cout << "Error opening file: " << filename << std::endl; // exit(-1); // } // _u64 fsize = in.tellg(); // in.seekg(0, std::ios::beg); // _u32 dim_u32; // in.read((char *) &dim_u32, sizeof(unsigned)); // in.close(); // dim = dim_u32; // // _u64 ndims = (_u64) dim; // _u64 disk_vec_size = ndims * sizeof(T) + sizeof(unsigned); // _u64 mem_vec_size = ndims * sizeof(T); // _u64 npts = fsize / disk_vec_size; // num = npts; // std::cout << "Tvecs: " << filename << ", npts: " << npts // << ", ndims: " << ndims << "\n"; // // allocate memory // data = new T[npts * ndims]; // // cached_ifstream reader(std::string(filename), 256 * 1024 * 1024); // unsigned dummy_ndims; // for (_u64 i = 0; i < npts; i++) { // T *cur_vec = data + (i * ndims); // // read and ignore dummy ndims // reader.read((char *) &dummy_ndims, sizeof(unsigned)); // // // read vec // reader.read((char *) cur_vec, mem_vec_size); // } // return; // } // // // each row in returned matrix is aligned to 32-byte boundary // template<typename T> // inline void aligned_load_Tvecs(char *filename, T *&data, unsigned &num, // unsigned &dim) { // // check validity of file // std::ifstream in(filename, std::ios::binary); // if (!in.is_open()) { // std::cout << "Error opening file: " << filename << std::endl; // exit(-1); // } // // in.read((char *) &dim, sizeof(unsigned)); // in.seekg(0, std::ios::end); // std::ios::pos_type ss = in.tellg(); // in.close(); // // // calculate vector size // size_t fsize = (size_t) ss; // size_t per_row = sizeof(unsigned) + dim * sizeof(T); // num = fsize / per_row; // std::cout << "# points = " << num << ", original dimension = " << dim // << std::endl; // // // create aligned buf // unsigned aligned_dim = ROUND_UP(dim, 8); // std::cout << "Aligned dimesion = " << aligned_dim << std::endl; // // // data = new T[(size_t) num * (size_t) dim]; // alloc_aligned((void **) &data, // (size_t) num * (size_t) aligned_dim * sizeof(T), 32); // // memset((void *) data, 0, (size_t) num * (size_t) aligned_dim * // sizeof(T)); // // // open classical fd // FileHandle fd; //#ifndef _WINDOWS // fd = open(filename, O_RDONLY); // assert(fd != -1); //#else // fd = CreateFileA(filename, GENERIC_READ, 0, nullptr, OPEN_EXISTING, // FILE_FLAG_RANDOM_ACCESS, nullptr); //#endif // // // parallel read each vector at the desired offset // // #pragma omp parallel for schedule(static, 32768) // for (size_t i = 0; i < num; i++) { // // computed using actual dimension // uint64_t file_offset = (per_row * i) + sizeof(unsigned); // // computed using aligned dimension // T *buf = data + i * aligned_dim; // //#ifndef _WINDOWS // int ret = -1; // ret = pread(fd, (char *) buf, dim * sizeof(T), file_offset); //#else // DWORD ret = -1; // OVERLAPPED overlapped; // memset(&overlapped, 0, sizeof(overlapped)); // overlapped.OffsetHigh = // (uint32_t)((file_offset & 0xFFFFFFFF00000000LL) >> 32); // overlapped.Offset = (uint32_t)(file_offset & 0xFFFFFFFFLL); // if (!ReadFile(fd, (LPVOID) buf, dim * sizeof(T), &ret, &overlapped)) { // std::cout << "Read file returned error: " << GetLastError() // << std::endl; // } //#endif // // // std::cout << "ret = " << ret << "\n"; // if (ret != dim * sizeof(T)) { // std::cout << "read=" << ret << ", expected=" << dim * sizeof(T); // assert(ret == dim * sizeof(T)); // } // } // std::cout << "Finished reading Tvecs" << std::endl; // //#ifndef _WINDOWS // close(fd); //#else // CloseHandle(fd); //#endif // } // plain saves data as npts X ndims array into filename template<typename T> void save_Tvecs(const char *filename, T *data, size_t npts, size_t ndims) { std::string fname(filename); // create cached ofstream with 64MB cache cached_ofstream writer(fname, 64 * 1048576); unsigned dims_u32 = (unsigned) ndims; // start writing for (uint64_t i = 0; i < npts; i++) { // write dims in u32 writer.write((char *) &dims_u32, sizeof(unsigned)); // get cur point in data T *cur_pt = data + i * ndims; writer.write((char *) cur_pt, ndims * sizeof(T)); } } // NOTE :: good efficiency when total_vec_size is integral multiple of 64 inline void prefetch_vector(const char *vec, size_t vecsize) { size_t max_prefetch_size = (vecsize / 64) * 64; for (size_t d = 0; d < max_prefetch_size; d += 64) _mm_prefetch((const char *) vec + d, _MM_HINT_T0); } // NOTE :: good efficiency when total_vec_size is integral multiple of 64 inline void prefetch_vector_l2(const char *vec, size_t vecsize) { size_t max_prefetch_size = (vecsize / 64) * 64; for (size_t d = 0; d < max_prefetch_size; d += 64) _mm_prefetch((const char *) vec + d, _MM_HINT_T1); } }; // namespace diskann struct PivotContainer { PivotContainer() = default; PivotContainer(size_t pivo_id, float pivo_dist) : piv_id{pivo_id}, piv_dist{pivo_dist} { } bool operator<(const PivotContainer &p) const { return p.piv_dist < piv_dist; } bool operator>(const PivotContainer &p) const { return p.piv_dist > piv_dist; } size_t piv_id; float piv_dist; }; inline bool file_exists(const std::string &name) { struct stat buffer; auto val = stat(name.c_str(), &buffer); // std::cout << " Stat(" << name.c_str() << ") returned: " << val << // std::endl; return (val == 0); } inline _u64 get_file_size(const std::string &fname) { std::ifstream reader(fname, std::ios::binary | std::ios::ate); if (!reader.fail() && reader.is_open()) { _u64 end_pos = reader.tellg(); // std::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos // << std::endl; reader.close(); return end_pos; } else { std::cout << "Could not open file: " << fname << std::endl; return 0; } } inline bool validate_file_size(const std::string &name) { std::ifstream in(std::string(name), std::ios::binary); in.seekg(0, in.end); size_t actual_file_size = in.tellg(); in.seekg(0, in.beg); size_t expected_file_size; in.read((char *) &expected_file_size, sizeof(uint64_t)); if (actual_file_size != expected_file_size) { std::cout << "Error loading " << name << ". Expected " "size (metadata): " << expected_file_size << ", actual file size : " << actual_file_size << ". Exiting." << std::endl; in.close(); return false; } in.close(); return true; } #ifdef _WINDOWS #include <Psapi.h> inline void printProcessMemory(const char *message) { PROCESS_MEMORY_COUNTERS counters; HANDLE h = GetCurrentProcess(); GetProcessMemoryInfo(h, &counters, sizeof(counters)); std::cout << message << " [Peaking Working Set size: " << counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024) << "GB Working set size: " << counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024) << "GB Private bytes " << counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]" << std::endl; } #else inline void printProcessMemory(const char *message) { } #endif
GB_unaryop__one_int64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_int64_int64 // op(A') function: GB_tran__one_int64_int64 // C type: int64_t // A type: int64_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_int64_int64 ( int64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_zgeqrt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_geqrt * * Computes a QR factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = Q \times R * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(1) H(2) ... H(k), * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^H * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and above the diagonal of the array * contain the min(m,n)-by-n upper trapezoidal tile R (R is * upper triangular if m >= n); the elements below the diagonal, * with the array tau, represent the unitary tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-n triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliary workspace array of length n. * * @param work * Auxiliary workspace array of length ib*n. * * @param[in] lwork * Size of the array work. Should be at least ib*n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ int core_zgeqrt(int m, int n, int ib, plasma_complex64_t *A, int lda, plasma_complex64_t *T, int ldt, plasma_complex64_t *tau, plasma_complex64_t *work) { // Check input arguments. if (m < 0) { coreblas_error("illegal value of m"); return -1; } if (n < 0) { coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ( (ib == 0) && (m > 0) && (n > 0) )) { coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { coreblas_error("NULL T"); return -6; } if (ldt < imax(1, ib) && ib > 0) { coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { coreblas_error("NULL tau"); return -8; } if (work == NULL) { coreblas_error("NULL work"); return -9; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k-i); LAPACKE_zgeqr2_work(LAPACK_COL_MAJOR, m-i, sb, &A[lda*i+i], lda, &tau[i], work); LAPACKE_zlarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, sb, &A[lda*i+i], lda, &tau[i], &T[ldt*i], ldt); if (n > i+sb) { LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft), lapack_const(Plasma_ConjTrans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, n-i-sb, sb, &A[lda*i+i], lda, &T[ldt*i], ldt, &A[lda*(i+sb)+i], lda, work, n-i-sb); } } return PlasmaSuccess; } /******************************************************************************/ void core_omp_zgeqrt(int m, int n, int ib, plasma_complex64_t *A, int lda, plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(out:T[0:ib*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]); // Call the kernel. int info = core_zgeqrt(m, n, ib, A, lda, T, ldt, tau, tau+n); if (info != PlasmaSuccess) { plasma_error("core_zgeqrt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
hybrid_report_mask.c
/* hybid,id report_mask collects and prints the masks from each rank. 1.) Get mask for each rank (and integer array -- no hex). Root gathers masks and prints them 2.) Get node name from each rank. Determine and use maximum length of node name 3.) Call print_mask. */ #include <stdio.h> #include <mpi.h> #include <omp.h> #include <sched.h> #include <unistd.h> #include <stdlib.h> #include <ctype.h> // basic routes void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask); int hybrid_report_mask(void){ int thrd, nthrds; int rank, nranks; static int multi_node = 0; int ncpus, nel_set; static int ** omp_proc_mask; static int * omp_mask_pac; char *dummy; char proc_name[MPI_MAX_PROCESSOR_NAME]; static char * all_names; int name_len; static int max_name_len; // General int i,j,ierr; int id, rid,tid; int in_mpi, in_omp; // Mask storage int ** proc_mask; static int * all_masks=0; MPI_Initialized(&in_mpi); in_omp = omp_in_parallel(); if(in_mpi != 0 && in_omp == 0){ // Get number of cpus (this gives no. of cpu_ids in /proc/cpuinfo) // Get rank number & no of ranks via MPI ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nranks); // Create a 2-D array for mask // proc_mask[rank][ncpus] -- for simplicity, size is [ncpus][ncpus] // Thinking ahead for hybrid code. // zero out proc_mask[ncpus][ncpus] // I could have made proc_mask a single array (proc_mask[ncpus]); but didn't // This is a hold-over from the openmp version that holds everything for all threads. // For MPI I made a continguous collection array (all_masks). proc_mask = malloc(sizeof(int*)*ncpus); for(i=0;i<ncpus;i++) proc_mask[i] = malloc(sizeof(int)*ncpus); for(i=0;i<ncpus;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0; all_masks = (int *) malloc(sizeof(int)*ncpus*ncpus); // get map for this processor ierr=boundto(&nel_set,proc_mask[rank]); // Gather information to rank 0 MPI_Gather( proc_mask[rank], ncpus, MPI_INT, all_masks, ncpus, MPI_INT, 0, MPI_COMM_WORLD); // Get a list of nodes from all ranks. MPI_Get_processor_name(proc_name,&name_len); MPI_Allreduce(&name_len, &max_name_len, 1,MPI_INT, MPI_MAX, MPI_COMM_WORLD); all_names = malloc(sizeof(int*)*nranks*(max_name_len+1)); MPI_Gather( proc_name, max_name_len+1 , MPI_CHAR, all_names, max_name_len+1, MPI_CHAR, 0, MPI_COMM_WORLD); // If multiple nodes, make muti_node not equal to 0. if(rank == 0) for(id=0;id<nranks;id++){ if( strcmp(&all_names[id*(max_name_len+1)],&all_names[0]) ) multi_node++; } } // End of Pure MPI part if(in_mpi != 0 && in_omp != 0){ if(all_masks == 0) { printf("ERROR: ***** You must call hybrid_report_mask() in a Pure MPI region first. ***** \n"); exit(1); } thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); #pragma omp single { omp_proc_mask = malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) omp_proc_mask[i] = malloc(sizeof(int)*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) omp_proc_mask[i][j] =0; } #pragma omp critical ierr = boundto(&nel_set,omp_proc_mask[thrd]); #pragma omp barrier MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); #pragma omp master { omp_mask_pac = (int *) malloc(sizeof(int)*nranks*ncpus); // need packing space for mpi send/recv if(rank == 0){ print_mask(1, dummy, multi_node, 0, 0, ncpus, nthrds,nranks, omp_proc_mask[0]); //print header fflush(stdout); for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, 0,tid, ncpus, nthrds,nranks, omp_proc_mask[tid]); } fflush(stdout); for(rid=1;rid<nranks;rid++){ // Receive other rank's packed mask arrays MPI_Recv(omp_mask_pac, nthrds*ncpus, MPI_INT, rid, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[rid*(max_name_len+1)], multi_node, rid,tid, ncpus, nthrds,nranks, &omp_mask_pac[tid*ncpus]); } fflush(stdout); } // rank loop } // end root printing else{ //all other ranks // All non-root ranks send to root. for(rid=1;rid<nranks;rid++){ // Pack up the ranks' mask arrays (Uh, should have made one array from beginning!) for( tid=0;tid<nthrds;tid++){ for( id=0; id<ncpus; id++) omp_mask_pac[(tid*ncpus)+id] = omp_proc_mask[tid][id]; } // Send to root MPI_Send(omp_mask_pac, nthrds*ncpus, MPI_INT, 0, 99, MPI_COMM_WORLD); } //all other ranks } // end non-root printing MPI_Barrier(MPI_COMM_WORLD); } // end of Master #pragma omp barrier } // end of OpenMP part }
OpenMP.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <math.h> #define array_size 2000000 //function to perform the final calculation for the pearson correlation double calculate_pearson(double pearson_array[5]); int main(int argc, char* argv[]) { //********DECLARE VARIABLES************// /*******SERIAL VARIABLES******/ double start_serial, end_serial; // timer variables //pointers for arrays double *x; double *y; double *xy; double *xsqr; double *ysqr; int i;//iterations //sum variables double xsum; double ysum; double xysum; double xsqr_sum; double ysqr_sum; xsum = ysum = xysum = xsqr_sum = ysqr_sum = 0; double sums_array_serial[5];//array to hold sums for final calculation double coeff_serial; // pearson coeff from serial code /*****PARALLEL VARIABLES******/ double start_parallel, end_parallel; // timer variables //pointers for arrays double *p_x; double *p_y; double *p_xy; double *p_xsqr; double *p_ysqr; double p_xsum; double p_ysum; double p_xysum; double p_xsqr_sum; double p_ysqr_sum; p_xsum = p_ysum = p_xysum = p_xsqr_sum = p_ysqr_sum = 0; double sums_array_parallel[5];//array to hold sums for final calculations double coeff_parallel; //pearson coeff from parallel code /********************* SERIAL CODE ************************************/ //start timer start_serial = omp_get_wtime(); #pragma omp flush(start_serial) // use flush to make sure command is exectued now e.g. make sure we get an accurate time recording //allocate memory for the arrays x = (double *)malloc(array_size * sizeof(double)); y = (double *)malloc(array_size * sizeof(double)); xy = (double *)malloc( array_size * sizeof(double)); xsqr = (double *)malloc( array_size * sizeof(double)); ysqr = (double *)malloc( array_size * sizeof(double)); /* calculate: xsum, ysum, xysum, xsqr_sum, ysqr_sum */ for (i = 0; i < array_size ; i++) { x[i] = sin(i); y[i] = sin(i+2); xy[i] = x[i] * y[i]; xsqr[i] = x[i] * x[i]; ysqr[i] = y[i] * y[i]; xsum += x[i]; ysum += y[i]; xysum += xy[i]; xsqr_sum += xsqr[i]; ysqr_sum += ysqr[i]; } free(x); free(y); free(xy); free(xsqr); free(ysqr); //assign values to the array ready to do the final calculation sums_array_serial[0] = xsum; sums_array_serial[1] = ysum; sums_array_serial[2] = xysum; sums_array_serial[3] = xsqr_sum; sums_array_serial[4] = ysqr_sum; /* calculate pearson*/ coeff_serial = calculate_pearson(sums_array_serial); //end timer end_serial = omp_get_wtime(); #pragma omp flush(end_serial) //* pritn schedule type**// const char* s = getenv("OMP_SCHEDULE"); printf("Schedule type and chunk size: %s\n", s); //* print the result */ printf("Serial - Pearson Correlation Coefficient : %f\n", coeff_serial); //print run time printf("Serial time: %1.2f\n", end_serial-start_serial); /******************* PARALLEL CODE ************************************/ //start timer start_parallel = omp_get_wtime(); #pragma omp flush(start_parallel) //allocate memory for the arrays p_x = (double *)malloc(array_size * sizeof(double)); p_y = (double *)malloc(array_size * sizeof(double)); p_xy = (double *)malloc(array_size * sizeof(double)); p_xsqr = (double *)malloc(array_size * sizeof(double)); p_ysqr = (double *)malloc(array_size * sizeof(double)); //calculate: xsum, ysum, xysum, xsqr_sum, ysqr_sum in parallel #pragma omp parallel for \ reduction(+:p_xsum, p_ysum, p_xysum, p_xsqr_sum, p_ysqr_sum)\ schedule(runtime) for(i = 0 ; i < array_size ; i++){ p_x[i] = sin(i); p_y[i] = sin(i+2); p_xy[i] = p_x[i] * p_y[i]; p_xsqr[i] = p_x[i] * p_x[i]; p_ysqr[i] = p_y[i] * p_y[i]; p_xsum += p_x[i]; p_ysum += p_y[i]; p_xysum += p_xy[i]; p_xsqr_sum += p_xsqr[i]; p_ysqr_sum += p_ysqr[i]; } free(p_x); free(p_y); free(p_xy); free(p_xsqr); free(p_ysqr); //assign values to the array ready to do the final calculation sums_array_parallel[0] += p_xsum; sums_array_parallel[1] += p_ysum; sums_array_parallel[2] += p_xysum; sums_array_parallel[3] += p_xsqr_sum; sums_array_parallel[4] += p_ysqr_sum; /* calculate pearson */ coeff_parallel = calculate_pearson(sums_array_parallel); //end timer end_parallel = omp_get_wtime(); #pragma omp flush(end_parallel) //print result printf("Parallel - Pearson Correlation Coefficient: %f\n", coeff_parallel); //print run time printf("Parallel time: %1.2f\n", end_parallel-start_parallel); //print speed up printf("Speed up: %1.2f\n \n", (end_serial-start_serial)/(end_parallel-start_parallel)); return 0; } //this function takes the results from the calcuate_sums function and calculates the pearson coefficient// //see report for the formula used double calculate_pearson(double pearson_array[5]){ double num; //numerator double deno; //denominator //calculate the numerator num = (pearson_array[2] - (pearson_array[0] * pearson_array[1]/array_size)); //calculate the denominator deno = (pearson_array[3] - (pearson_array[0] * pearson_array[0]/array_size)) * (pearson_array[4] - (pearson_array[1] * pearson_array[1]/array_size)); //calculate correlation coefficient return num / sqrt(deno); }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } /** * \brief: 用来获取一张图的真实标签信息. 对于图像检测,标签信息包括目标类别(用类别编号表示) * 以及矩形框中心点坐标 x,y 以及宽高 w,h * * \param: path 一张图片所在路径,字符数组 * num_boxes 每张图片允许处理的最大的矩形框数; 如果图片中包含的矩形框大于 * num_boxes, 那么随机取其中 num_boxes 个矩形框参与训练 * classes 本函数并未使用该参数 * flip 图片在之前读入时是否进行左右翻转 * dx dx 是中间图相对最终图的起点位置的 x 坐标除以最终图的宽度(并取负值) * dy dy 是中间图相对最终图的起点位置的 x 坐标除以最终图的高度(并取负值) * sx sx 是中间图宽度与最终图宽度的比值 * sy sy 是中间图高度与最终图高度的比值 * * 说明: 后面五个参数,用来矫正矩形框的信息,因为在此函数之前,对输入图片进行了缩放、 * 平移、左右翻转一系列的数据增广操作,这些操作不会改变物体的类别信息, 但会改 * 变物体的位置信息,也即矩形框信息,因此需要进行矫正. * 这些参数的具体含义上面可能未说清,具体可参看本函数内部调用时用法 * * \return: truth 存储一张图片包含的所有标签信息, 实质上是一个一维数组,每个矩形框 * 有 5 条信息. 对于检测而言,主要包括物体类别以及定位(矩形框)信息. */ void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { // 确定 labels 文件存放目录 char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); // 读取 labelpath 标注文件中对应的标注信息 int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); // 打乱标注框的顺序 correct_boxes(boxes, count, dx, dy, sx, sy, flip); // 根据可能进行过的图像增广方式调整矩形框标注参数 if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i,j; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for(j = 0; j < k; ++j){ fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } /** * \brief: 获取对应数据集中的类名, 返回值为一个二维字符数组. * 字符串一般保存在一维字符数组中, 因此对于字符串数组, 就是一个二维数组 * * \param: filename 文件路径名 * * \return: char** 类型,包含从文件中读取到的类名 */ char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*90; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } /** * * \brief: 加载训练检测器时的原始数据并进行图像增广. * * 从所有训练图片中,随机读取 n 张,并对这 n 张图片进行数据增广,同时矫正 * 增广后的数据标签信息。最终得到的图片的宽高为 w,h(原始训练集中的图片尺寸不定), * 也就是送入网络处理的图片尺寸, * * 数据增广包括: 对原始图片进行宽高方向上的插值缩放(两方向上缩放系数不一定相同), * 下面称之为缩放抖动; 随机抠取或者平移图片(位置抖动); * 在 hsv 颜色空间增加噪声(颜色抖动); 左右水平翻转,不含旋转抖动。 * * \param: n 一个线程读入的图片张数, 不是总的训练图片张数, 而是分配到该线程上的 * n, 比如总共要读入 128 张图片, 共开启 8 个线程读数据, 那么本函数中 * 的 n 为 128 / 8 = 16 * paths 所有训练图片所在路径的集合,是一个二维数组,每一行对应一张图片的路径 * 程序将在其中随机取 n 个 * m paths 的行数,也即训练图片总数 * w 网络能够处理的图的宽度 * h 网络能够处理的图的高度 * boxes 每张训练图片中程序处理的最多矩形框数 * 图片内可能含有大量的矩形框,那么就在其中随机选择 boxes 个参与训练, * 具体执行在 fill_truth_detection() 函数中 * classes 类别总数. 本函数并未用到(其实是 fill_truth_detection() 函数) * jitter 这个参数表示图片缩放抖动的剧烈程度,这个值越大,允许的抖动范围越大 * 所谓缩放抖动,就是在宽高上插值缩放图片,宽高两方向上缩放的系数不一定相同 * hue 颜色(hsv颜色空间)上色调(取值0-360度)偏差的最大值, * 实际色调偏差为-hue~hue之间的随机值 * saturation 颜色(hsv颜色空间)上色彩饱和度(取值范围0~1)缩放的最大值 * exposure 颜色(hsv颜色空间)上明度(色彩明亮程度,0~1)缩放的最大值 * * \return: data 类型数据,包含一个线程读入的所有图片数据(含有n张图片) * * 说明: 最后四个参数用于数据增广, 主要对原图进行缩放抖动, 位置抖动(平移)以及颜色抖动 * (颜色值增加一定噪声),抖动一定程度上可以理解成对图像增加噪声。 * * 通过对原始图像进行抖动,实现数据增广。最后三个参数在 random_distort_image() * 函数中使用 * * 从此函数可以看出,darknet 对训练集中图片的尺寸没有要求,可以是任意尺寸的图片, * 因为经该函数处理(缩放/裁剪)之后,不管是什么尺寸的照片,都会统一为网络训练使用的尺寸 */ data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { // 从 paths 中随机选取 n 张图片路径; 注意内存空间管理 char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); // 依次读入每一张图片到 d.X.vals 的适当位置 // 同时读入对应的标签信息到 d.y.vals 的适当位置 for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); // 新的长宽比 float scale = rand_uniform(.25, 2); // 缩放比例 float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); // 宽的变化 float dy = rand_uniform(0, h - nh); // 高的变化 // 平移 place_image(orig, nw, nh, dx, dy, sized); // hsv 颜色空间上的色度增广 random_distort_image(sized, hue, saturation, exposure); // 镜像翻转 int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; // 加载 labels 数据, 并且根据增广方式调整图片的 Bndbox 参数 fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); } free(random_paths); return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } /* * \brief: 开辟多个线程读入图片数据,读入数据存储至 ptr.d 中(主要调用 * load_in_thread() 函数完成) * * \param: ptr: 包含所有线程要读入图片数据的信息( 如: 读入多少张, 开几个线程读入, * 读入图片最终的宽高, 图片路径等等 ) * * 流程: 本函数首先会获取要读入图片的张数、要开启线程的个数, 而后计算每个线程应该读入的 * 图片张数(尽可能的均匀分配). 之后创建所有的线程,并行读入数据,最后合并每个线程 * 读入的数据至一个大 data 中,这个 data 的指针变量与 ptr 的指针变量 * 指向的是统一块内存, 因此也就最终将数据读入到 ptr.d 中(因此函数没有返回值) */ void *load_threads(void *ptr) { int i; // args 变量(不是指针变量)是 ptr 指向的内存空间的拷贝; // 但是 ptr 指向内存空间中保存的指针变量指向的空间仍然和 args 变量中的指针变量 // 指向的空间相同, 即共享内存空间(下面代码中的 buffers) load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; // 所有的训练图片数据总数 free(ptr); // 可以看出, 每个线程负责加载一部分数据, 最后再进行汇总 data *buffers = calloc(args.threads, sizeof(data)); pthread_t *threads = calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; // 均分每个线程的加载数量, 防止不能被整除的情况 args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = calloc(num, sizeof(float *)); r.y.vals = calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = calloc(train.X.rows, sizeof(float*)); test.X.vals = calloc(test.X.rows, sizeof(float*)); train.y.vals = calloc(train.y.rows, sizeof(float*)); test.y.vals = calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
omp_parallel_firstprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel firstprivate directive.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp parallel firstprivate</ompts:directive> <ompts:dependences>omp for omp critical</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" //static int sum1 = 789; int <ompts:testcode:functionname>omp_parallel_firstprivate</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int sum, num_threads,sum1; </ompts:orphan:vars> int known_sum; sum = 0; sum1=7; num_threads = 0; <ompts:orphan> #pragma omp parallel <ompts:check>firstprivate(sum1)</ompts:check> <ompts:crosscheck>private(sum1)</ompts:crosscheck> { printf("sum1=%d\n",sum1); int i; #pragma omp for for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /*end of for*/ printf("after loop sum1=%d for thread %d\n",sum1, omp_get_thread_num()); #pragma omp critical { sum = sum + sum1; num_threads++; } /*end of critical*/ } /* end of parallel*/ </ompts:orphan> known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode> </ompts:test>
mash.h
// Gao Wang (c) 2017-2020 wang.gao@columbia.edu #ifndef _MASH_H #define _MASH_H #include <cmath> #include <armadillo> #include <iostream> #ifdef _OPENMP # include <omp.h> #endif using std::log; using std::exp; using std::sqrt; using arma::uword; using arma::vec; using arma::uvec; using arma::rowvec; using arma::colvec; using arma::mat; using arma::cube; using arma::datum; using arma::zeros; using arma::eye; using arma::size; using arma::accu; using arma::sum; using arma::max; using arma::abs; using arma::sqrt; using arma::pow; using arma::exp; using arma::log; using arma::trace; using arma::trans; using arma::find; using arma::inv; using arma::trimatu; using arma::chol; using arma::dot; using arma::intersect; using arma::find; // CONSTANTS // --------- const double LOG_2PI = log(2.0 * M_PI); const double INV_SQRT_2PI = 1.0 / sqrt(2.0 * M_PI); const double LOG_INV_SQRT_2PI = log(INV_SQRT_2PI); // INLINE FUNCTION DEFINITONS // -------------------------- inline vec dnorm(const vec & x, const vec & mu, const vec & sigma2, bool logd = false) { vec res = LOG_INV_SQRT_2PI - log(sqrt(sigma2)) - pow(x - mu, 2.0) / (2.0 * sigma2); if (logd) return res; else return exp(res); } inline vec dmvnorm_mat(const mat & x, const vec & mean, const mat & sigma, bool logd = false, bool inversed = false) { double xdim = static_cast<double>(x.n_rows); vec out(x.n_cols); mat rooti; // we have previously computed rooti // in R eg rooti <- backsolve(chol(sigma), diag(ncol(x))) if (inversed) { rooti = sigma; } else { try { rooti = trans(inv(trimatu(chol(sigma)))); } catch (const std::runtime_error & error) { if (logd) out.fill(-datum::inf); else out.fill(0.0); for (uword i = 0; i < x.n_cols; ++i) if (accu(abs(x.col(i) - mean)) < 1e-6) out.at(i) = datum::inf; return out; } } double rootisum = sum(log(rooti.diag())); double constants = -(xdim / 2.0) * LOG_2PI; for (unsigned i = 0; i < x.n_cols; i++) { vec z = rooti * (x.col(i) - mean); out.at(i) = constants - 0.5 * sum(z % z) + rootisum; } if (logd == false) { out = exp(out); } return out; } inline double dmvnorm(const vec & x, const vec & mean, const mat & sigma, bool logd = false, bool inversed = false) { mat rooti; if (inversed) { rooti = sigma; } else { try { rooti = trans(inv(trimatu(chol(sigma)))); } catch (const std::runtime_error & error) { double diff = accu(abs(x - mean)); if (logd) return (diff < 1e-6) ? datum::inf : -datum::inf; else return (diff < 1e-6) ? datum::inf : 0.0; } } double rootisum = sum(log(rooti.diag())); double constants = -(static_cast<double>(x.n_elem) / 2.0) * LOG_2PI; vec z = rooti * (x - mean); double out = constants - 0.5 * sum(z % z) + rootisum; if (logd == false) { out = exp(out); } return out; } template <class T, class U> inline T pnorm(const U & x, const T & m, const T & s, bool logd = false, bool lower_tail = true) { // see `normalCDF` function at: // http://en.cppreference.com/w/cpp/numeric/math/erfc T res = 0.5 * arma::erfc((x - m) / s * M_SQRT1_2); // FIXME: unlike R::pnorm(0,0,0) = 1 and R::pnorm(-1,0,0) = 0, here it generates NaN // I manually fix it below. // "s == 0" check is not good enough to ensure that res doesn't have NaN due to division by zero uvec nan = arma::find_nonfinite(0 / s); if (nan.n_elem > 0) { res.elem(intersect(find(x >= m), nan)).ones(); res.elem(intersect(find(x < m), nan)).zeros(); } if (!lower_tail & !logd) { return 1.0 - res; } else if (lower_tail & !logd) { return res; } else if (!lower_tail & logd) { return log(1.0 - res); } else { // (lower_tail & logd) return log(res); } } // a quicker way to compute diag(s) %*% V %*% diag(s) inline mat get_cov(const vec & s, const mat & V, const mat & L) { if (L.is_empty()) { /* return arma::diagmat(s) * V * arma::diagmat(s); */ return (V.each_col() % s).each_row() % s.t(); } else { mat svs = (V.each_col() % s).each_row() % s.t(); return L * svs * L.t(); } } inline mat get_cov(const vec & s, const mat & V) { /* return arma::diagmat(s) * V * arma::diagmat(s); */ return (V.each_col() % s).each_row() % s.t(); } // @title posterior_cov // @param Vinv R x R inverse covariance matrix for the likelihood // @param U R x R prior covariance matrix // @return R x R posterior covariance matrix // @description If bhat is N(b,V) and b is N(0,U) then b|bhat N(mu1,U1). This function returns U1. inline mat get_posterior_cov(const mat & Vinv, const mat & U) { // U %*% solve(Vinv %*% U + diag(nrow(U))) mat S = Vinv * U; S.diag() += 1.0; return U * S.i(); } // @title posterior_mean // @param bhat R vector of observations // @param Vinv R x R inverse covariance matrix for the likelihood // @param U1 R x R posterior covariance matrix, computed using posterior_cov // @return R vector of posterior mean // @description If bhat is N(b,V) and b is N(0,U) then b|bhat N(mu1,U1). This function returns mu1. inline vec get_posterior_mean(const vec & bhat, const mat & Vinv, const mat & U1) { return U1 * Vinv * bhat; } inline mat get_posterior_mean_mat(const mat & bhat, const mat & Vinv, const mat & U1) { return U1 * Vinv * bhat; } // SE CLASS // -------- class SE { public: SE(){ } ~SE(){ } void set(const mat & sbhat, const mat & sbhat_alpha) { s = sbhat; if (sbhat_alpha.is_empty()) s_alpha.ones(sbhat.n_rows, sbhat.n_cols); else s_alpha = sbhat_alpha; } void set(int J, int R) { s.ones(J, R); s_alpha.ones(J, R); } void set_original(const mat & value) { s_orig = value; is_orig_empty = s_orig.is_empty(); } const mat & get_original() const { if (is_orig_empty) return (s); else return (s_orig); } const mat & get() const { return (s_alpha); } private: mat s; mat s_orig; mat s_alpha; bool is_orig_empty; }; // FUNCTION DECLARATIONS // --------------------- int mash_compute_posterior(const mat& b_mat, const SE& s_obj, const mat& v_mat, const mat& l_mat, const mat& a_mat, const cube& U_cube, const cube& Vinv_cube, const cube& U0_cube, mat& post_mean, mat& post_var, mat& neg_prob, mat& zero_prob, cube& post_cov, const mat& posterior_weights, const int& report_type); int mash_compute_posterior_comcov(const mat& b_mat, const SE & s_obj, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, const mat & posterior_weights, const int & report_type); int mvsermix_compute_posterior(const mat& b_mat, const mat & s_mat, mat & v_mat, cube & U_cube, cube & Vinv_cube, cube & U0_cube, cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights); int mvsermix_compute_posterior_comcov(const mat& b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, const cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights); // POSTERIORMASH CLASS // ------------------- // @param b_mat R by J // @param s_mat R by J // @param s_orig_mat R by J // @param s_alpha_mat R by J // @param v_mat R by R // @param l_mat R by R for the common baseline application (@Yuxin Zou) // @param a_mat Q by R for the common baseline application (@Yuxin Zou) // @param U_cube list of prior covariance matrices, for each mixture component P by R by R class PosteriorMASH { public: PosteriorMASH(const mat & b_mat, const mat & s_mat, const mat & s_alpha_mat, const mat & s_orig_mat, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube) : b_mat(b_mat), v_mat(v_mat), l_mat(l_mat), a_mat(a_mat), U_cube(U_cube) { int J = b_mat.n_cols, R = b_mat.n_rows; if (s_mat.is_empty()) s_obj.set(R, J); else s_obj.set(s_mat, s_alpha_mat); s_obj.set_original(s_orig_mat); if (!a_mat.is_empty()) { R = a_mat.n_rows; } post_mean.set_size(R, J); post_var.set_size(R, J); post_cov.set_size(R, R, J); neg_prob.set_size(R, J); zero_prob.set_size(R, J); post_mean.zeros(); post_var.zeros(); post_cov.zeros(); neg_prob.zeros(); zero_prob.zeros(); #ifdef _OPENMP omp_set_num_threads(1); #endif } ~PosteriorMASH(){ } // @title Compute posterior matrices // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect // @param report_type an integer: 1 for posterior mean only, 2 for posterior second moment, 3 for default mash output, 4 for additionally posterior covariance int compute_posterior(const mat & posterior_weights, const int & report_type) { return mash_compute_posterior(b_mat, s_obj, v_mat, l_mat, a_mat, U_cube, Vinv_cube, U0_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, posterior_weights, report_type); } // @title Compute posterior matrices when covariance SVS is the same for all J conditions // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect // @param report_type an integer: 1 for posterior mean only, 2 for posterior second moment, 3 for default mash output, 4 for additionally posterior covariance int compute_posterior_comcov(const mat & posterior_weights, const int & report_type) { return mash_compute_posterior_comcov(b_mat, s_obj, v_mat, l_mat, a_mat, U_cube, Vinv_cube, U0_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, posterior_weights, report_type); } // compute_posterior_comcov // initializing some optinally precomputed quantities int set_vinv(const cube & value) { Vinv_cube = value; return 0; } int set_U0(const cube & value) { U0_cube = value; return 0; } int set_thread(const int & value) { #ifdef _OPENMP omp_set_num_threads(value); #endif return 0; } // @return PosteriorMean JxR matrix of posterior means // @return PosteriorSD JxR matrix of posterior (marginal) standard deviations // @return NegativeProb JxR matrix of posterior (marginal) probability of being negative // @return ZeroProb JxR matrix of posterior (marginal) probability of being zero mat PosteriorMean() { return post_mean.t(); } mat PosteriorSD() { return sqrt(post_var).t(); } const cube & PosteriorCov() const { return post_cov; } mat NegativeProb() { return neg_prob.t(); } mat ZeroProb() { return zero_prob.t(); } private: // input mat b_mat; SE s_obj; mat v_mat; mat l_mat; mat a_mat; cube U_cube; cube Vinv_cube; cube U0_cube; // output // all R X J mat mat post_mean; mat post_var; mat neg_prob; mat zero_prob; // J X R X R cube cube post_cov; }; // POSTERIORASH CLASS // ------------------ // @param b_vec of J // @param s_vec of J // @param s_alpha_vec of J // @param v double // @param U_vec of P class PosteriorASH { public: PosteriorASH(const vec & b_vec, const vec & s_vec, const vec & s_alpha, double v, const vec & U_vec) : b_vec(b_vec), s_vec(s_vec), v(v), U_vec(U_vec) { int J = b_vec.n_elem; if (s_alpha.is_empty()) s_alpha_vec.ones(J); else s_alpha_vec = s_alpha; post_mean.set_size(J); post_var.set_size(J); neg_prob.set_size(J); zero_prob.set_size(J); } ~PosteriorASH(){ } // @title Compute posterior matrices // @description univariate version of PosteriorMASH::compute_posterior(), same logic // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect int compute_posterior(const mat & posterior_weights) { vec vinv = 1 / (s_vec % s_vec * v); unsigned J = b_vec.n_elem; unsigned P = U_vec.n_elem; vec mean(J, arma::fill::zeros); // J X P matrices mat mu1_mat(J, P, arma::fill::zeros); mat mu2_mat(J, P, arma::fill::zeros); mat zero_mat(J, P, arma::fill::zeros); mat neg_mat(J, P, arma::fill::zeros); for (uword p = 0; p < P; ++p) { vec U1 = U_vec.at(p) / (vinv * U_vec.at(p) + 1.0); mu1_mat.col(p) = U1 % vinv % b_vec % s_alpha_vec; U1 = U1 % (s_alpha_vec % s_alpha_vec); mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1; vec sigma = sqrt(U1); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword j = 0; j < J; ++j) { if (U1.at(j) == 0) { zero_mat.at(j, p) = 1.0; neg_mat.at(j, p) = 0.0; } } } // compute weighted means of posterior arrays for (uword j = 0; j < J; ++j) { post_mean.at(j) = dot(mu1_mat.row(j), posterior_weights.col(j)); post_var.at(j) = dot(mu2_mat.row(j), posterior_weights.col(j)); neg_prob.at(j) = dot(neg_mat.row(j), posterior_weights.col(j)); zero_prob.at(j) = dot(zero_mat.row(j), posterior_weights.col(j)); } post_var -= pow(post_mean, 2.0); return 0; } // compute_posterior // @return PosteriorMean J vec of posterior means // @return PosteriorSD J vec of posterior (marginal) standard deviations // @return NegativeProb J vec of posterior (marginal) probability of being negative // @return ZeroProb J vec of posterior (marginal) probability of being zero const vec & PosteriorMean() const { return post_mean; } vec PosteriorSD() { return sqrt(post_var); } const vec & PosteriorCov() const { return post_var; } const vec & NegativeProb() const { return neg_prob; } const vec & ZeroProb() const { return zero_prob; } private: // input of J vecs vec b_vec; vec s_vec; vec s_alpha_vec; double v; vec U_vec; // output of J vecs vec post_mean; vec post_var; vec neg_prob; vec zero_prob; }; // MVSERMIX CLASS // -------------- // @title Inferences for Multivariate Single Effect Regression with Mixture prior // @param b_mat R by J // @param s_mat R by J // @param v_mat R by R // @param U_cube list of prior covariance matrices, for each mixture component P by R by R class MVSERMix { public: MVSERMix(const mat & b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube) : b_mat(b_mat), s_mat(s_mat), v_mat(v_mat), U_cube(U_cube) { int J = b_mat.n_cols, R = b_mat.n_rows; post_mean.set_size(R, J); post_var.set_size(R, J); post_cov.set_size(R, R, J); neg_prob.set_size(R, J); zero_prob.set_size(R, J); post_mean.zeros(); post_var.zeros(); post_cov.zeros(); neg_prob.zeros(); zero_prob.zeros(); prior_scalar.set_size(U_cube.n_slices); #ifdef _OPENMP omp_set_num_threads(1); #endif } ~MVSERMix(){ } // @title Compute posterior matrices and EM updates for prior scalar estimate // @description Make posterior inferences, and also perform the EM update for prior scalar, for mvSuSiE model. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect. // @param posterior_variable_weights P X J matrix, the posterior inclusion probabilities of each effect in a single-effect model. // posterior_variable_weights is only relevant when EM updates for prior scalar is needed. int compute_posterior(const mat & posterior_weights, const mat & posterior_variable_weights) { return mvsermix_compute_posterior(b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, posterior_weights, posterior_variable_weights); } // compute_posterior // @title Compute posterior matrices when covariance SVS is the same for all J conditions // @description More detailed description of function goes here. // @param posterior_weights P X J matrix, the posterior probabilities of each mixture component for each effect int compute_posterior_comcov(const mat & posterior_weights, const mat & posterior_variable_weights) { return mvsermix_compute_posterior_comcov(b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, posterior_weights, posterior_variable_weights); } // compute_posterior_comcov // initializing some optinally precomputed quantities int set_Vinv(const cube & value) { Vinv_cube = value; return 0; } int set_U0(const cube & value) { U0_cube = value; return 0; } int set_Uinv(const cube & value) { Uinv_cube = value; return 0; } int set_thread(const int & value) { #ifdef _OPENMP omp_set_num_threads(value); #endif return 0; } // @return PosteriorMean JxR matrix of posterior means // @return PosteriorSD JxR matrix of posterior (marginal) standard deviations // @return NegativeProb JxR matrix of posterior (marginal) probability of being negative // @return ZeroProb JxR matrix of posterior (marginal) probability of being zero mat PosteriorMean() { return post_mean.t(); } mat PosteriorSD() { return sqrt(post_var).t(); } const cube & PosteriorCov() const { return post_cov; } mat NegativeProb() { return neg_prob.t(); } mat ZeroProb() { return zero_prob.t(); } const vec & PriorScalar() const { return prior_scalar; } private: // input mat b_mat; mat s_mat; mat v_mat; cube U_cube; cube Vinv_cube; cube U0_cube; cube Uinv_cube; // output // all R X J mat mat post_mean; mat post_var; mat neg_prob; mat zero_prob; // J X R X R cube cube post_cov; // P vector of scalars vec prior_scalar; }; // Softmax functions: yi = exp(xi) / sum(exp(xj)) inline vec softmax(const vec & x) { // Calculate exp() // Subtract the max - this prevents overflow, which happens for x ~ 1000 vec y = exp(x - max(x)); // Renormalise y /= sum(y); return y; } // function for "shrinking" the covariance matrix, to get $\hat U_k$. inline mat shrink_cov(const mat & V, const double & eps) { vec eigval; mat eigvec; eig_sym(eigval, eigvec, V); for (uword i = 0; i < eigval.n_elem; ++i) { eigval(i) = (eigval(i) > 1.0) ? eigval(i) : (1.0 + eps); } return eigvec * diagmat(eigval) * trans(eigvec); } // TEEM CLASS // ---------- // @title Truncated Eigenvalue Extreme deconvolution // @description ... // @param X // @param w // @param U // @param maxiter // @param tol // @param verbose class TEEM { public: TEEM(const mat & X_mat, const vec & w_vec, const cube & U_cube) : X_mat(X_mat), w_vec(w_vec) { T_cube = U_cube; for (unsigned j = 0; j < T_cube.n_slices; ++j) { T_cube.slice(j) += eye(size(T_cube.slice(j))); } } ~TEEM(){ } const vec & get_objective() const { return objective; } const vec & get_maxd() const { return maxd; } const vec & get_w() const { return w_vec; } cube get_U() { cube U_cube = T_cube; for (unsigned j = 0; j < U_cube.n_slices; ++j) { U_cube.slice(j) -= eye(size(U_cube.slice(j))); } return U_cube; } int fit(const int & maxiter, const double & converge_tol, const double & eigen_tol, const bool & verbose) { // initialize to store progress objective.zeros(maxiter); maxd.zeros(maxiter); int iter_out = 0; // Get the number of samples (n) and the number of mixture components (k) unsigned int n = X_mat.n_rows; unsigned int k = w_vec.size(); for (unsigned int iter = 0; iter < (unsigned int) maxiter; ++iter) { // store parameters and likelihood in the previous step vec w0_vec = w_vec; // E-step: calculate posterior probabilities using the current mu and sigmas mat logP = zeros<mat>(n, k); // n by k matrix for (unsigned j = 0; j < k; ++j) { logP.col(j) = log(w_vec(j)) + dmvnorm_mat(trans(X_mat), zeros<vec>( X_mat.n_cols), T_cube.slice(j), true); // ?? } // softmax for renormalization mat P_mat = zeros<mat>(k, n); // k by n matrix. because of row/col vec converting for (uword i = 0; i < n; ++i) { colvec y = arma::conv_to<colvec>::from(logP.row(i)); P_mat.col(i) = softmax(y); } P_mat = trans(P_mat); // n by k matrix // M-step: for (unsigned int j = 0; j < k; ++j) { T_cube.slice(j) = trans(X_mat) * (P_mat.col(j) % X_mat.each_col()) / accu(P_mat.col(j)); T_cube.slice(j) = shrink_cov(T_cube.slice(j), eigen_tol); } // update mixture weights w_vec = arma::conv_to<colvec>::from(sum(P_mat, 0)) / n; // 0:sum by column; // Compute log-likelihood at the current estimates double f = compute_loglik(); // Check stopping criterion double d = max(abs(w_vec - w0_vec)); maxd(iter) = d; objective(iter) = f; iter_out = iter; if (d < converge_tol) { break; } } objective.resize(iter_out + 1); maxd.resize(iter_out + 1); return 0; } // fit private: mat X_mat; vec w_vec; cube T_cube; vec objective; vec maxd; double compute_loglik() { unsigned int n = X_mat.n_rows; unsigned int k = w_vec.size(); vec y = zeros<vec>(n); for (unsigned int j = 0; j < k; ++j) { y = y + w_vec(j) * dmvnorm_mat(trans(X_mat), zeros<vec>(X_mat.n_cols), T_cube.slice(j)); } return (sum(log(y))); } }; // FUNCTION DEFINITIONS // -------------------- // @title calc_lik // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior covariances // @param b_mat R by J // @param s_mat R by J // @param v_mat R by R // @param l_mat R by R for the common baseline application (@Yuxin Zou) // @param U_cube list of prior covariance matrices // @param sigma_cube list of sigma which is result of get_cov(s_mat, v_mat, l_mat) // @param logd if true computes log-likelihood // @param common_cov if true use version for common covariance // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const mat & b_mat, const mat & s_mat, const mat & v_mat, const mat & l_mat, const cube & U_cube, const cube & sigma_cube, bool logd, bool common_cov, int n_thread = 1) { // In armadillo data are stored with column-major ordering // slicing columns are therefore faster than rows // lik is a J by P matrix mat lik(b_mat.n_cols, U_cube.n_slices, arma::fill::zeros); vec mean(b_mat.n_rows, arma::fill::zeros); mat sigma; #ifdef _OPENMP omp_set_num_threads(n_thread); #endif if (common_cov) { if (!sigma_cube.is_empty()) sigma = sigma_cube.slice(0); else sigma = get_cov(s_mat.col(0), v_mat, l_mat); #pragma omp parallel for default(none) schedule(static) shared(lik, U_cube, mean, sigma, logd, b_mat) for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dmvnorm_mat(b_mat, mean, sigma + U_cube.slice(p), logd); } } else { #pragma \ omp parallel for default(none) schedule(static) shared(lik, mean, logd, U_cube, b_mat, sigma_cube, l_mat, v_mat, s_mat) private(sigma) for (uword j = 0; j < lik.n_rows; ++j) { if (!sigma_cube.is_empty()) sigma = sigma_cube.slice(j); else sigma = get_cov(s_mat.col(j), v_mat, l_mat); for (uword p = 0; p < lik.n_cols; ++p) { lik.at(j, p) = dmvnorm(b_mat.col(j), mean, sigma + U_cube.slice(p), logd); } } } return lik; } // @title calc_lik multivariate common cov version with sigma inverse precomputed // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior covariances // @param b_mat R by J // @param rooti_cube R by R by P, or R by R by J by P, if common_cov is False // @param logd if true computes log-likelihood // @param common_cov if true use version for common covariance // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const mat & b_mat, const cube & rooti_cube, bool logd, bool common_cov, int n_thread = 1) { #ifdef _OPENMP omp_set_num_threads(n_thread); #endif // In armadillo data are stored with column-major ordering // slicing columns are therefore faster than rows // lik is a J by P matrix int P; if (common_cov) P = rooti_cube.n_slices; else P = rooti_cube.n_slices / b_mat.n_cols; mat lik(b_mat.n_cols, P, arma::fill::zeros); vec mean(b_mat.n_rows, arma::fill::zeros); if (common_cov) { #pragma omp parallel for default(none) schedule(static) shared(lik, mean, logd, rooti_cube, b_mat) for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dmvnorm_mat(b_mat, mean, rooti_cube.slice(p), logd, true); } } else { #pragma omp parallel for default(none) schedule(static) shared(lik, mean, logd, rooti_cube, b_mat) for (uword j = 0; j < lik.n_rows; ++j) { for (uword p = 0; p < lik.n_cols; ++p) { lik.at(j, p) = dmvnorm(b_mat.col(j), mean, rooti_cube.slice(j * lik.n_cols + p), logd, true); } } } return lik; } // @title calc_lik univariate version // @description computes matrix of likelihoods for each of J cols of Bhat for each of P prior sigma // @param b_vec of J // @param s_vec of J // @param v numeric // @param U_vec P vector // @param logd if true computes log-likelihood // @return J x P matrix of multivariate normal likelihoods, p(bhat | U[p], V) mat calc_lik(const vec & b_vec, const vec & s_vec, double v, const vec & U_vec, bool logd) { mat lik(b_vec.n_elem, U_vec.n_elem, arma::fill::zeros); vec sigma = s_vec % s_vec * v; vec mean(b_vec.n_elem, arma::fill::zeros); for (uword p = 0; p < lik.n_cols; ++p) { lik.col(p) = dnorm(b_vec, mean, sigma + U_vec.at(p), logd); } return lik; } // This implements the core part of the compute_posterior method in // the PosteriorMASH class. int mash_compute_posterior(const mat& b_mat, const SE& s_obj, const mat& v_mat, const mat& l_mat, const mat& a_mat, const cube& U_cube, const cube& Vinv_cube, const cube& U0_cube, mat& post_mean, mat& post_var, mat& neg_prob, mat& zero_prob, cube& post_cov, const mat& posterior_weights, const int& report_type) { vec mean(post_mean.n_rows); mean.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, report_type, mean, post_mean, post_var, neg_prob, zero_prob, post_cov, b_mat, s_obj, l_mat, v_mat, a_mat, U_cube, Vinv_cube, U0_cube) for (uword j = 0; j < post_mean.n_cols; ++j) { // FIXME: improved math may help here mat Vinv_j; if (Vinv_cube.is_empty()) Vinv_j = inv_sympd(get_cov(s_obj.get_original().col(j), v_mat, l_mat)); else Vinv_j = Vinv_cube.slice(j); // R X P matrices mat mu1_mat(post_mean.n_rows, U_cube.n_slices); mat diag_mu2_mat(post_mean.n_rows, U_cube.n_slices); mat zero_mat(post_mean.n_rows, U_cube.n_slices); mat neg_mat(post_mean.n_rows, U_cube.n_slices); mu1_mat.fill(0); diag_mu2_mat.fill(0); zero_mat.fill(0); for (uword p = 0; p < U_cube.n_slices; ++p) { // mat U1(post_mean.n_rows, post_mean.n_rows); mat U0; U1.fill(0); if (U0_cube.is_empty()) U0 = get_posterior_cov(Vinv_j, U_cube.slice(p)); else U0 = U0_cube.slice(j * U_cube.n_slices + p); if (a_mat.is_empty()) { mu1_mat.col(p) = get_posterior_mean(b_mat.col(j), Vinv_j, U0) % s_obj.get().col(j); U1 = (U0.each_col() % s_obj.get().col(j)).each_row() % s_obj.get().col(j).t(); } else { mu1_mat.col(p) = a_mat * (get_posterior_mean(b_mat.col(j), Vinv_j, U0) % s_obj.get().col(j)); U1 = a_mat * (((U0.each_col() % s_obj.get().col(j)).each_row() % s_obj.get().col(j).t()) * a_mat.t()); } if (report_type == 2 || report_type == 4) { post_cov.slice(j) += posterior_weights.at(p, j) * (U1 + mu1_mat.col(p) * mu1_mat.col(p).t()); } vec sigma = sqrt(U1.diag()); // U1.diag() is the posterior covariance diag_mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1.diag(); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword r = 0; r < sigma.n_elem; ++r) { if (sigma.at(r) == 0) { zero_mat.at(r, p) = 1.0; neg_mat.at(r, p) = 0.0; } } } // compute weighted means of posterior arrays post_mean.col(j) = mu1_mat * posterior_weights.col(j); post_var.col(j) = diag_mu2_mat * posterior_weights.col(j); neg_prob.col(j) = neg_mat * posterior_weights.col(j); zero_prob.col(j) = zero_mat * posterior_weights.col(j); // if (report_type == 4) post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } post_var -= pow(post_mean, 2.0); return 0; } // mash_compute_posterior // This implements the core part of the compute_posterior_comcov method in // the PosteriorMASH class. int mash_compute_posterior_comcov(const mat& b_mat, const SE & s_obj, const mat & v_mat, const mat & l_mat, const mat & a_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, const mat & posterior_weights, const int & report_type) { mat mean(post_mean.n_rows, post_mean.n_cols); mean.fill(0); // R X R mat Vinv; if (Vinv_cube.is_empty()) Vinv = inv_sympd(get_cov(s_obj.get_original().col(0), v_mat, l_mat)); else Vinv = Vinv_cube.slice(0); rowvec ones(post_mean.n_cols); rowvec zeros(post_mean.n_cols); ones.fill(1); zeros.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, report_type, mean, Vinv, ones, zeros, post_mean, post_var, neg_prob, zero_prob, post_cov, b_mat, s_obj, a_mat, U_cube, U0_cube) for (uword p = 0; p < U_cube.n_slices; ++p) { mat zero_mat(post_mean.n_rows, post_mean.n_cols); // R X R mat U1(post_mean.n_rows, post_mean.n_rows); // R X J mat mu1_mat(post_mean.n_rows, post_mean.n_cols); mat U0; zero_mat.fill(0); U1.fill(0); mu1_mat.fill(0); if (U0_cube.is_empty()) U0 = get_posterior_cov(Vinv, U_cube.slice(p)); else U0 = U0_cube.slice(p); if (a_mat.is_empty()) { mu1_mat = get_posterior_mean_mat(b_mat, Vinv, U0) % s_obj.get(); U1 = (U0.each_col() % s_obj.get().col(0)).each_row() % s_obj.get().col(0).t(); } else { mu1_mat = a_mat * (get_posterior_mean_mat(b_mat, Vinv, U0) % s_obj.get()); U1 = a_mat * (((U0.each_col() % s_obj.get().col(0)).each_row() % s_obj.get().col(0).t()) * a_mat.t()); } // R X J mat diag_mu2_mat = pow(mu1_mat, 2.0); diag_mu2_mat.each_col() += U1.diag(); // R X J // FIXME: any better way to init sigma? mat sigma(post_mean.n_rows, post_mean.n_cols); sigma.fill(0); sigma.each_col() += sqrt(U1.diag()); // U1.diag() is the posterior covariance mat neg_mat = pnorm(mu1_mat, mean, sigma); for (uword r = 0; r < sigma.n_rows; ++r) { if (sigma.at(r, 0) == 0) { zero_mat.row(r) = ones; neg_mat.row(r) = zeros; } } // compute weighted means of posterior arrays #pragma omp critical { post_mean += mu1_mat.each_row() % posterior_weights.row(p); post_var += diag_mu2_mat.each_row() % posterior_weights.row(p); neg_prob += neg_mat.each_row() % posterior_weights.row(p); zero_prob += zero_mat.each_row() % posterior_weights.row(p); if (report_type == 2 || report_type == 4) { for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) += posterior_weights.at(p, j) * (U1 + mu1_mat.col(j) * mu1_mat.col(j).t()); } } } } post_var -= pow(post_mean, 2.0); // if (report_type == 4) { #pragma omp parallel for schedule(static) default(none) shared(post_cov, post_mean) for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } } return 0; } // mash_compute_posterior_comcov // This implements the core part of the compute_posterior method in // the MVSERMix class. int mvsermix_compute_posterior(const mat& b_mat, const mat & s_mat, mat & v_mat, cube & U_cube, cube & Vinv_cube, cube & U0_cube, cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights) { vec mean(post_mean.n_rows); mean.fill(0); // This is meant to store a length P of 2nd moment matrices, // each element is \sum_j posterior_{p,j} * mu2_{p,j} cube Eb2_cube; bool to_estimate_prior = !posterior_variable_weights.is_empty(); if (to_estimate_prior) { // we will compute the EM update for prior scalar here // for use with mmbr package Eb2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); Eb2_cube.zeros(); } #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, posterior_variable_weights, to_estimate_prior, mean, Eb2_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, b_mat, s_mat, v_mat, U_cube, Vinv_cube, U0_cube, Uinv_cube) for (uword j = 0; j < post_mean.n_cols; ++j) { // FIXME: improved math may help here mat Vinv_j; if (Vinv_cube.is_empty()) Vinv_j = inv_sympd(get_cov(s_mat.col(j), v_mat)); else Vinv_j = Vinv_cube.slice(j); // R X P matrices mat mu1_mat(post_mean.n_rows, U_cube.n_slices); mat diag_mu2_mat(post_mean.n_rows, U_cube.n_slices); mat zero_mat(post_mean.n_rows, U_cube.n_slices); mat neg_mat(post_mean.n_rows, U_cube.n_slices); mu1_mat.fill(0); diag_mu2_mat.fill(0); zero_mat.fill(0); neg_mat.fill(0); // R X R X P cube mu2_cube; mu2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); for (uword p = 0; p < U_cube.n_slices; ++p) { mat U1; if (U0_cube.is_empty()) U1 = get_posterior_cov(Vinv_j, U_cube.slice(p)); else U1 = U0_cube.slice(j * U_cube.n_slices + p); mu1_mat.col(p) = get_posterior_mean(b_mat.col(j), Vinv_j, U1); // this is posterior 2nd moment for the j-th variable and the p-th prior mu2_cube.slice(p) = U1 + mu1_mat.col(p) * mu1_mat.col(p).t(); // add to posterior 2nd moment contribution of the p-th component post_cov.slice(j) += posterior_weights.at(p, j) * mu2_cube.slice(p); vec sigma = sqrt(U1.diag()); // U1.diag() is the posterior covariance diag_mu2_mat.col(p) = pow(mu1_mat.col(p), 2.0) + U1.diag(); neg_mat.col(p) = pnorm(mu1_mat.col(p), mean, sigma); for (uword r = 0; r < sigma.n_elem; ++r) { if (sigma.at(r) == 0) { zero_mat.at(r, p) = 1.0; neg_mat.at(r, p) = 0.0; } } } // compute weighted means of posterior arrays post_mean.col(j) = mu1_mat * posterior_weights.col(j); post_var.col(j) = diag_mu2_mat * posterior_weights.col(j); neg_prob.col(j) = neg_mat * posterior_weights.col(j); zero_prob.col(j) = zero_mat * posterior_weights.col(j); post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); if (to_estimate_prior) { #pragma omp critical { for (uword p = 0; p < U_cube.n_slices; ++p) { // we will compute some quantity to provide for // EM update for prior scalar in mmbr package // the M-step update is: // \sigma_0^2 = \sum_{p=1}^P p(\gamma_p) \mathrm{tr}(U_p^{-1} E[bb^T \,|\, \gamma_p])/r // where E[bb^T \,|\, \gamma_p] = \sum_j \alpha_{p,j} * mu2_mat_{p,j} Eb2_cube.slice(p) += posterior_variable_weights.at(p, j) * mu2_cube.slice(p); } } } } post_var -= pow(post_mean, 2.0); if (to_estimate_prior) { // now compute \mathrm{tr}(U_p^{-1} E[bb^T \,|\, \gamma_p])/r for each p for (uword p = 0; p < U_cube.n_slices; ++p) { prior_scalar.at(p) = trace(Uinv_cube.slice(p) * Eb2_cube.slice(p)); } } return 0; } // mvsermix_compute_posterior // This implements the core part of the compute_posterior_comcov method in // the MVSERMix class. int mvsermix_compute_posterior_comcov(const mat& b_mat, const mat & s_mat, const mat & v_mat, const cube & U_cube, const cube & Vinv_cube, const cube & U0_cube, const cube & Uinv_cube, mat & post_mean, mat & post_var, mat & neg_prob, mat & zero_prob, cube & post_cov, vec & prior_scalar, const mat & posterior_weights, const mat & posterior_variable_weights) { mat mean(post_mean.n_rows, post_mean.n_cols); mean.fill(0); // for Eb2_cube see compute_posterior() for detailed documentations. cube Eb2_cube; bool to_estimate_prior = !posterior_variable_weights.is_empty(); if (to_estimate_prior) { Eb2_cube.set_size(post_mean.n_rows, post_mean.n_rows, U_cube.n_slices); Eb2_cube.zeros(); } // R X R mat Vinv; if (Vinv_cube.is_empty()) Vinv = inv_sympd(get_cov(s_mat.col(0), v_mat)); else Vinv = Vinv_cube.slice(0); rowvec ones(post_mean.n_cols); rowvec zeros(post_mean.n_cols); ones.fill(1); zeros.fill(0); #pragma \ omp parallel for schedule(static) default(none) shared(posterior_weights, posterior_variable_weights, to_estimate_prior, mean, Vinv, zeros, ones, Eb2_cube, post_mean, post_var, neg_prob, zero_prob, post_cov, prior_scalar, b_mat, U_cube, U0_cube, Uinv_cube) for (uword p = 0; p < U_cube.n_slices; ++p) { mat zero_mat(post_mean.n_rows, post_mean.n_cols); // R X R mat U1; // R X J mat mu1_mat; zero_mat.fill(0); if (U0_cube.is_empty()) U1 = get_posterior_cov(Vinv, U_cube.slice(p)); else U1 = U0_cube.slice(p); mu1_mat = get_posterior_mean_mat(b_mat, Vinv, U1); cube mu2_cube; mu2_cube.set_size(post_mean.n_rows, post_mean.n_rows, post_mean.n_cols); for (uword j = 0; j < post_mean.n_cols; ++j) { mu2_cube.slice(j) = U1 + mu1_mat.col(j) * mu1_mat.col(j).t(); if (to_estimate_prior) Eb2_cube.slice(p) += posterior_variable_weights.at(p, j) * mu2_cube.slice(j); } if (to_estimate_prior) prior_scalar.at(p) = trace(Uinv_cube.slice(p) * Eb2_cube.slice(p)); // R X J mat diag_mu2_mat = pow(mu1_mat, 2.0); diag_mu2_mat.each_col() += U1.diag(); // R X J // FIXME: any better way to init sigma? mat sigma(post_mean.n_rows, post_mean.n_cols); sigma.fill(0); sigma.each_col() += sqrt(U1.diag()); // U1.diag() is the posterior covariance mat neg_mat = pnorm(mu1_mat, mean, sigma); for (uword r = 0; r < sigma.n_rows; ++r) { if (sigma.at(r, 0) == 0) { zero_mat.row(r) = ones; neg_mat.row(r) = zeros; } } #pragma omp critical { // compute weighted means of posterior arrays post_mean += mu1_mat.each_row() % posterior_weights.row(p); post_var += diag_mu2_mat.each_row() % posterior_weights.row(p); neg_prob += neg_mat.each_row() % posterior_weights.row(p); zero_prob += zero_mat.each_row() % posterior_weights.row(p); for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) += posterior_weights.at(p, j) * mu2_cube.slice(j); } } } post_var -= pow(post_mean, 2.0); #pragma omp parallel for schedule(static) default(none) shared(post_cov, post_mean) for (uword j = 0; j < post_mean.n_cols; ++j) { post_cov.slice(j) -= post_mean.col(j) * post_mean.col(j).t(); } return 0; } // mvsermix_compute_posterior_comcov #endif // ifndef _MASH_H
zsyr2k.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_syr2k * * Performs one of the symmetric rank 2k operations * * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C, \f] * * where alpha and beta are scalars, * C is an n-by-n symmetric matrix, and A and B are n-by-k matrices * in the first case and k-by-n matrices in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f] * - PlasmaTrans: * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= zero. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A and B matrices; * if trans = PlasmaTrans, number of rows of the A and B matrices. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * An lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] pB * An ldb-by-kb matrix. * If trans = PlasmaNoTrans, kb = k; * if trans = PlasmaTrans, kb = n. * * @param[in] ldb * The leading dimension of the array B. * If trans = PlasmaNoTrans, ldb >= max(1, n); * if trans = PlasmaTrans, ldb >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * An ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_zsyr2k * @sa plasma_csyr2k * @sa plasma_dsyr2k * @sa plasma_ssyr2k * ******************************************************************************/ int plasma_zsyr2k(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, plasma_complex64_t alpha, plasma_complex64_t *pA, int lda, plasma_complex64_t *pB, int ldb, plasma_complex64_t beta, plasma_complex64_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) { plasma_error("illegal value of trans"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (k < 0) { plasma_error("illegal value of k"); return -4; } int am, an; int bm, bn; if (trans == PlasmaNoTrans) { am = n; an = k; bm = n; bn = k; } else { am = k; an = n; bm = k; bn = n; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -9; } if (ldc < imax(1, n)) { plasma_error("illegal value of ldc"); return -12; } // quick return if (n == 0 || ((alpha == 0.0 || k == 0.0) && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_syr2k(plasma, PlasmaComplexDouble, n, k); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); plasma_omp_zge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_zsyr2k(uplo, trans, alpha, A, B, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_syr2k * * Performs rank 2k update. * Non-blocking tile version of plasma_zsyr2k(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f] * - PlasmaTrans: * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f] * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * *@param[in] B * Descriptor of matrix B. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zsyr2k * @sa plasma_omp_zsyr2k * @sa plasma_omp_csyr2k * ******************************************************************************/ void plasma_omp_zsyr2k(plasma_enum_t uplo, plasma_enum_t trans, plasma_complex64_t alpha, plasma_desc_t A, plasma_desc_t B, plasma_complex64_t beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = trans == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pzsyr2k(uplo, trans, alpha, A, B, beta, C, sequence, request); }
sum_of_vector_elements.c
// Felix F Feliu opemMP reduction clouse #include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { // variable declaration int a[10] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; int sum, i; //initialize sum sum = 0; //add vector elements using a parallel block implementing reduction clouse #pragma omp parallel for reduction(+:sum) //reduction clouse for (i = 0; i <= 9; i++) { sum += a[i]; } //print sum value printf("Sum of vector a is %d.\n", sum); return 0; }
SceneGraphConverterOCC.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <unordered_set> #include <osg/Material> #include <osg/Geode> #include <osg/CullFace> #include <osg/Point> #include <osg/Switch> #include <osgText/Text> #include <BRepAdaptor_Curve.hxx> #include <BRep_Tool.hxx> #include <BRepMesh_IncrementalMesh.hxx> #include <GCPnts_AbscissaPoint.hxx> #include <GCPnts_UniformAbscissa.hxx> #include <Geom_Line.hxx> #include <Poly.hxx> #include <TopExp.hxx> #include <TopExp_Explorer.hxx> #include <TopoDS.hxx> #include <TopoDS_Edge.hxx> #include <TopoDS_Shape.hxx> #include <TopoDS_Vertex.hxx> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/model/BuildingModel.h> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h> #include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include "GeometryInputDataOCC.h" class ScenegraphConverterOCC : public StatusCallback { protected: std::map<int, osg::ref_ptr<osg::Switch> > m_map_entity_id_to_switch; // Map: IfcProduct ID -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_to_switch; // Map: Representation identifier -> scenegraph switch shared_ptr<GeometrySettings> m_geom_settings; double m_recent_progress = 0; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ScenegraphConverterOCC( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings( geom_settings ) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ScenegraphConverterOCC() {} // after calling convertToOSG, the OSG Switches are in the map returned by this method const std::map<int, osg::ref_ptr<osg::Switch> >& getMapIdSwitch() { return m_map_entity_id_to_switch; } struct RenderOptions { RenderOptions(){} RenderOptions( osg::Vec4f color, double distance_between_points_in_mm = 0.5, bool create_points_along_straight_line = false ) { m_color = color; m_color_set = true; m_distance_between_points_in_mm = distance_between_points_in_mm; m_create_points_along_straight_line = create_points_along_straight_line; } osg::Vec4f m_color; bool m_color_set = false; double m_distance_between_points_in_mm = 0.5; bool m_create_points_along_straight_line = false; }; void clearInputCache() { m_map_entity_id_to_switch.clear(); m_map_representation_to_switch.clear(); m_vec_existing_statesets.clear(); } static void getEdgePoints( const TopoDS_Edge& edge, osg::Vec3Array* vertices, const RenderOptions& render_options ) { Standard_Real first = 0; Standard_Real last = 1; Handle( Geom_Curve ) c = BRep_Tool::Curve( edge, first, last ); bool discretize_points_on_straight_line = render_options.m_create_points_along_straight_line; if( c->DynamicType() == STANDARD_TYPE( Geom_Line ) && !discretize_points_on_straight_line ) { // just straight line const TopoDS_Vertex& v1 = TopExp::FirstVertex( edge ); const TopoDS_Vertex& v2 = TopExp::LastVertex( edge ); gp_Pnt point1 = BRep_Tool::Pnt( v1 ); gp_Pnt point2 = BRep_Tool::Pnt( v2 ); vertices->push_back( osg::Vec3d( point1.X(), point1.Y(), point1.Z() ) ); vertices->push_back( osg::Vec3d( point2.X(), point2.Y(), point2.Z() ) ); } else { double param_range = last - first; BRepAdaptor_Curve curve_adaptor(edge); //curve_adaptor.Initialize( edge ); #ifdef _DEBUG const TopoDS_Vertex& v1 = TopExp::FirstVertex( edge ); const TopoDS_Vertex& v2 = TopExp::LastVertex( edge ); gp_Pnt point1 = BRep_Tool::Pnt( v1 ); gp_Pnt point2 = BRep_Tool::Pnt( v2 ); #endif Standard_Real length_of_edge = GCPnts_AbscissaPoint::Length( curve_adaptor ); double distance = render_options.m_distance_between_points_in_mm; double num_points = 40*param_range/(2.0*M_PI); distance = length_of_edge/num_points; GCPnts_UniformAbscissa uniform_abscissa; uniform_abscissa.Initialize( curve_adaptor, distance ); if( uniform_abscissa.IsDone() ) { int nb_points = uniform_abscissa.NbPoints(); for( int i = 0; i < nb_points; ++i ) { Standard_Real parameter = uniform_abscissa.Parameter( i + 1 ); gp_Pnt pnt = curve_adaptor.Value( parameter ); vertices->push_back( osg::Vec3d( pnt.X(), pnt.Y(), pnt.Z() ) ); if( i > 0 && i < nb_points - 1 ) { vertices->push_back( osg::Vec3d( pnt.X(), pnt.Y(), pnt.Z() ) ); } } if( vertices->size()> 0 ) { if( vertices->size()%2 != 0 ) { vertices->push_back( vertices->back() ); } } } } } static void drawShape( const TopoDS_Shape& shape, osg::Geode* parent_geode, const RenderOptions& render_options ) { if( shape.IsNull() ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_lines = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_tri_storage = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> normals_tri_storage = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; #ifdef _DEBUG osg::ref_ptr<osg::Vec3Array> vertices_triangle_edges = new osg::Vec3Array(); #endif TopAbs_ShapeEnum shape_type = shape.ShapeType(); if( shape_type == TopAbs_WIRE || shape_type == TopAbs_EDGE || shape_type == TopAbs_VERTEX ) { TopExp_Explorer Ex; for( Ex.Init( shape, TopAbs_EDGE ); Ex.More(); Ex.Next() ) { TopoDS_Edge edge = TopoDS::Edge( Ex.Current() ); getEdgePoints( edge, vertices_lines, render_options ); } } else { Standard_Real linear_tolerance = 0.06*0.001; // for [m] Standard_Real angular_tolerance = 0.5; bool is_relative = false; BRepMesh_IncrementalMesh incremental_mesh( shape, linear_tolerance, is_relative, angular_tolerance ); TopExp_Explorer shape_explorer( shape, TopAbs_FACE ); for( ; shape_explorer.More(); shape_explorer.Next() ) { const TopoDS_Face& face = TopoDS::Face( shape_explorer.Current() ); TopLoc_Location L = TopLoc_Location(); const Handle( Poly_Triangulation )& poly_triangulation = BRep_Tool::Triangulation( face, L ); if( poly_triangulation.IsNull() ) { continue; } const gp_Trsf & face_trsf = L.Transformation(); Poly::ComputeNormals( poly_triangulation ); const TColgp_Array1OfPnt& triang_vertices = poly_triangulation->Nodes(); const TShort_Array1OfShortReal& triang_normals = poly_triangulation->Normals(); const Poly_Array1OfTriangle& triangles = poly_triangulation->Triangles(); // Number of nodes in the triangulation int num_vertices = poly_triangulation->Nodes().Length(); if( num_vertices*3 != triang_normals.Length() ) { std::cout << "Different number of normals and vertices\n"; return; } if( !vertices_tri_storage ) { vertices_tri_storage = new osg::Vec3Array(); } size_t offset_vertex_storage = vertices_tri_storage->size(); if( !normals_tri_storage ) { normals_tri_storage = new osg::Vec3Array(); } //size_t offset_normals_storage = normals_tri_storage->size(); // Get each vertex index, checking common vertices between shapes for( int i = 0; i < num_vertices; i++ ) { gp_Pnt triang_point = triang_vertices.Value( i+1 ); gp_Vec normal( triang_normals.Value( i*3 + 1 ), triang_normals.Value( i*3 + 2 ), triang_normals.Value( i*3 + 3 ) ); if( face_trsf.Form() != gp_Identity ) { triang_point.Transform( face_trsf ); normal.Transform( face_trsf ); } double x = std::round( triang_point.X()*10.0 )*0.1; double y = std::round( triang_point.Y()*10.0 )*0.1; double z = std::round( triang_point.Z()*10.0 )*0.1; vertices_tri_storage->push_back( osg::Vec3d( x, y, z ) ); normals_tri_storage->push_back( osg::Vec3d( normal.X(), normal.Y(), normal.Z() ) ); } if( !vertices_tri ) { vertices_tri = new osg::Vec3Array(); } if( !normals_tri ) { normals_tri = new osg::Vec3Array(); } int num_stored_vertices = vertices_tri_storage->size(); for( auto it = triangles.begin(); it != triangles.end(); ++it ) { const Poly_Triangle& triang = *it; int idx_tri1, idx_tri2, idx_tri3; triang.Get( idx_tri1, idx_tri2, idx_tri3 ); int idx1 = offset_vertex_storage + idx_tri1 - 1; int idx2 = offset_vertex_storage + idx_tri2 - 1; int idx3 = offset_vertex_storage + idx_tri3 - 1; if( idx1 >= num_stored_vertices || idx2 >= num_stored_vertices || idx3 >= num_stored_vertices ) { std::cout << "idx > num_stored_vertices" << std::endl; continue; } osg::Vec3 v1 = vertices_tri_storage->at( idx1 ); osg::Vec3 v2 = vertices_tri_storage->at( idx2 ); osg::Vec3 v3 = vertices_tri_storage->at( idx3 ); vertices_tri->push_back( v1 ); vertices_tri->push_back( v2 ); vertices_tri->push_back( v3 ); osg::Vec3 n1 = normals_tri_storage->at( idx1 ); osg::Vec3 n2 = normals_tri_storage->at( idx2 ); osg::Vec3 n3 = normals_tri_storage->at( idx3 ); normals_tri->push_back( n1 ); normals_tri->push_back( n2 ); normals_tri->push_back( n3 ); } } } if( vertices_tri->size() > 0 ) { if( vertices_tri->size() == normals_tri->size() ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); parent_geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); colors_normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); parent_geode->addDrawable( geometry_normals ); #endif } } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); parent_geode->addDrawable( geometry ); } } if( vertices_lines->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_lines ); if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_lines->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); parent_geode->addDrawable( geometry ); } #ifdef _DEBUG if( vertices_triangle_edges->size() > 0 && false ) { { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_triangle_edges ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices_triangle_edges->size(), osg::Vec4f( 0.6f, 0.7f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_triangle_edges->size() ) ); parent_geode->addDrawable( geometry ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } } #endif } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } AppearanceData::GeometryTypeEnum geom_type = appearance->m_apply_to_geometry_type; if( geom_type == AppearanceData::GEOM_TYPE_SURFACE || geom_type == AppearanceData::GEOM_TYPE_ANY ) { osg::StateSet* item_stateset = convertToOSGStateSet( appearance ); if( item_stateset != nullptr ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( geom_type == AppearanceData::GEOM_TYPE_CURVE ) { //osg::Vec4f color_lines( appearance->m_color_ambient.m_r, appearance->m_color_ambient.m_g, appearance->m_color_ambient.m_b, appearance->m_color_ambient.m_a ); //GeomUtils::setColorToLines( grp, color_lines ); } } } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeDataOCC>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } RenderOptions render_options; shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { return; } const int product_id = ifc_product->m_entity_id; std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; // create OSG objects std::vector<shared_ptr<RepresentationDataOCC> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationDataOCC>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeDataOCC> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeDataOCC>& item_input_data = product_items[i_item]; osg::ref_ptr<osg::Group> item_group = new osg::Group(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii_shapes = 0; ii_shapes < item_input_data->getShapes().size(); ++ii_shapes ) { const TopoDS_Shape& item_shape = item_input_data->getShapes()[ii_shapes]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawShape( item_shape, geode, render_options ); // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); if( geode->getNumDrawables() > 0 ) { item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", open shape " << ii_shapes; geode->setName( strs_item_shape_name.str().c_str() ); #endif } } // create shape for points const std::vector<TopoDS_Vertex>& vertex_points = item_input_data->getVertexPoints(); if( vertex_points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t ii_vertex_point = 0; ii_vertex_point < vertex_points.size(); ++ii_vertex_point ) { const TopoDS_Vertex& vertex_input = vertex_points[ii_vertex_point]; if( !vertex_input.IsNull() ) { gp_Pnt point1 = BRep_Tool::Pnt( vertex_input ); vertices->push_back( osg::Vec3d( point1.X(), point1.Y(), point1.Z() ) ); } } if( vertices->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", vertex_point "; geode->setName( strs_item_shape_name.str().c_str() ); #endif } else { std::cout << __FUNC__ << ": unexpected vertices->size() == 0" << std::endl; } } // create shape for polylines for( size_t ii_shapes = 0; ii_shapes < item_input_data->getPolylines().size(); ++ii_shapes ) { const TopoDS_Wire& polyline_data = item_input_data->getPolylines()[ii_shapes]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); RenderOptions render_options_polyline; if( item_input_data->getAppearances().size() > 0 ) { for( size_t ii_appearances = 0; ii_appearances < item_input_data->getAppearances().size(); ++ii_appearances ) { const shared_ptr<AppearanceData>& appearance = item_input_data->getAppearances()[ii_appearances]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::Vec4f color_lines( appearance->m_color_ambient.m_r, appearance->m_color_ambient.m_g, appearance->m_color_ambient.m_b, appearance->m_color_ambient.m_a ); render_options_polyline.m_color = color_lines; render_options_polyline.m_color_set = true; break; } } } drawShape( polyline_data, geode, render_options_polyline ); if( geode->getNumDrawables() > 0 ) { item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", polylines " << ii_shapes; geode->setName( strs_item_shape_name.str().c_str() ); #endif } } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_input_data->getTextItems().size(); ++ii ) { const shared_ptr<TextItemDataOCC>& text_data = item_input_data->getTextItems()[ii]; if( !text_data ) { continue; } gp_Trsf& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); gp_XYZ pos_translation = text_pos.TranslationPart(); osg::Vec3 pos2( pos_translation.X(), pos_translation.Y(), pos_translation.Z() );// text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_input_data->getAppearances().size() > 0 ) { applyAppearancesToGroup( item_input_data->getAppearances(), item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f, true ); } } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( std::map<int, shared_ptr<ProductShapeDataOCC> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_id_to_switch.clear(); m_map_representation_to_switch.clear(); shared_ptr<ProductShapeDataOCC> ifc_project_data; std::vector<shared_ptr<ProductShapeDataOCC> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeDataOCC> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<int, osg::ref_ptr<osg::Switch> >* map_entity_id = &m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_products) shared(map_entity_id, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,10) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeDataOCC>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def( ifc_object_def_weak ); std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( Standard_Failure& sf ) { thread_err << sf.GetMessageString(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_switch->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_id->insert( std::make_pair( product_id, product_switch ) ); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } } void addNodes( const std::map<int, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { int product_id = it_product_shapes->first; auto it_find = m_map_entity_id_to_switch.find( product_id ); if( it_find != m_map_entity_id_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } bool inParentList(const std::string entity_guid, osg::Group* group) { if (!group) { return false; } const osg::Group::ParentList& vec_parents = group->getParents(); for (size_t ii = 0; ii < vec_parents.size(); ++ii) { osg::Group* parent = vec_parents[ii]; if (parent) { const std::string parent_name = parent->getName(); if (parent_name.length() >= 22) { std::string parent_name_id = parent_name.substr(22); if (parent_name_id == entity_guid) { return true; } bool in_parent_list = inParentList(entity_guid, parent); if (in_parent_list) { return true; } } } } return false; } bool inParentList( const int entity_id, osg::Group* group ) { if( !group ) { return false; } const osg::Group::ParentList& vec_parents = group->getParents(); for( size_t ii = 0; ii < vec_parents.size(); ++ii ) { osg::Group* parent = vec_parents[ii]; if( parent ) { const std::string parent_name = parent->getName(); if( parent_name.length() > 0 ) { if( parent_name.at( 0 ) == '#' ) { // extract entity id std::string parent_name_id = parent_name.substr( 1 ); size_t last_index = parent_name_id.find_first_not_of( "0123456789" ); std::string id_str = parent_name_id.substr( 0, last_index ); const int id = std::stoi( id_str.c_str() ); if( id == entity_id ) { return true; } bool in_parent_list = inParentList( entity_id, parent ); if( in_parent_list ) { return true; } } } } } return false; } void resolveProjectStructure( const shared_ptr<ProductShapeDataOCC>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> object_def( product_data->m_ifc_object_definition ); const int entity_id = object_def->m_entity_id; if( SceneGraphUtils::inParentList( entity_id, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeDataOCC> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeDataOCC>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); resolveProjectStructure( child_product_data, group_subparts ); if( group_subparts->getNumChildren() > 0 ) { if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); } } auto it_product_map = m_map_entity_id_to_switch.find( entity_id ); if( it_product_map != m_map_entity_id_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << "#" << entity_id << "=" << object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } osg::StateSet* convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence ) { if( !appearence ) { return nullptr; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( abs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( abs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( abs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( abs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( abs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( abs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( abs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( abs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( abs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( abs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( abs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( abs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( abs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used return stateset_existing; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = new osg::StateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( stateset ); } return stateset; } };
data_str_core.h
/** * @author : Zhao Chonyyao (cyzhao@zju.edu.cn) * @date : 2021-04-30 * @description: data stream core helper. * @version : 1.0 */ #ifndef DATA_STR_H #define DATA_STR_H #include <unordered_map> #include <Eigen/Dense> #include <Eigen/Sparse> namespace PhysIKA { /** * data stream core class, store some data related to the optimization problem * */ template <typename T, size_t dim_> class dat_str_core { public: using SMP_TYPE = Eigen::SparseMatrix<T, Eigen::RowMajor>; virtual ~dat_str_core() {} dat_str_core(const size_t& dof, const bool hes_is_const = true); int set_zero(); //!!!!!!!WARNING!!!!!!!!!: reserve enough space int hes_reserve(const Eigen::VectorXi& nnzs); int hes_compress(); int hes_add_diag(const size_t& time); int setFromTriplets(); int save_val(const T& val); int save_gra(const Eigen::Matrix<T, Eigen::Dynamic, 1>& gra); int save_gra(const size_t& pos, const Eigen::Matrix<T, dim_, 1>& point_gra); int save_gra(const size_t& pos, const T& one_gra); template <typename Derived> int save_gra(const size_t& pos, const Eigen::MatrixBase<Derived>& point_gra) { for (size_t d = 0; d < dim_; ++d) { #pragma omp atomic gra_(dim_ * pos + d) += point_gra(d); } return 0; } int save_hes(const size_t& m, const size_t& n, const Eigen::Matrix<T, dim_, dim_>& loc_hes); int save_hes(const size_t& row, const size_t& col, const T& value); int set_hes_zero_after_pre_compute(); const T get_val() const; const Eigen::Matrix<T, Eigen::Dynamic, 1>& get_gra() const; const SMP_TYPE& get_hes() const; const size_t get_dof() const; //TODO:add Perfect Forwardincg private: const size_t dof_; const size_t whole_dim_; T val_; Eigen::Matrix<T, Eigen::Dynamic, 1> gra_; SMP_TYPE hes_; Eigen::Matrix<T, Eigen::Dynamic, 1> all_one_; bool has_pre_compute_hes_{ false }; std::vector<Eigen::Triplet<T>> trips; bool hes_is_const_; std::unordered_map<size_t, T*> hes_ref_; }; } // namespace PhysIKA // #include "data_str_core.imp" #endif
nn04.c
/* Description: This program implements a Neural Network of one hidden and one output layer Abides by Lab 5 Exercise 3 requirements Author: Georgios Evangelou (1046900) Year: 5 Parallel Programming in Machine Learning Problems Electrical and Computer Engineering Department, University of Patras System Specifications: CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips) GPU: Nvidia GTX 1050 (dual-fan, overclocked) RAM: 8GB (dual-channel, @2666 MHz) Version Notes: Compiles/Runs/Debugs with: gcc nn04.c -o nn04 -lm -O3 -fopenmp -fopt-info -pg && time ./nn04 && gprof ./nn04 Inherits all settings of previous version if not stated otherwise Performance for 100.000 steps: No multithreading ---------------------> 8.9 seconds CalculateInnerStates() multithreaded --> 17.1 seconds UpdateLayer() multithreaded -----------> 12.1 seconds Both multithreaded --------------------> 3.4 seconds */ // **************************************************************************************************************** #pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations #pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system #pragma GCC target("avx") //Enable AVX // **************************************************************************************************************** #include "stdio.h" #include "stdlib.h" #include "math.h" #include "omp.h" // **************************************************************************************************************** #define INPUT_SIZE 800 //Number of inputs #define LAYER1_NEURONS 100 //Number of 1st layer neurons #define LAYER2_NEURONS 10 //Number of 2nd layer neurons #define DEBUG 0 //Debugging options #define LEARNING_RATE 0.1 //Learning rate #define UNITY_NEURON 1 //The unity neuron double WL1[LAYER1_NEURONS][INPUT_SIZE+UNITY_NEURON]; //The weights of the 1st layer double WL2[LAYER2_NEURONS][LAYER1_NEURONS+UNITY_NEURON]; //The weights of the 2nd layer double DL1[LAYER1_NEURONS]; //The inner states of 1st layer double DL2[LAYER2_NEURONS]; //The inner states of 2nd layer double OL1[LAYER1_NEURONS]; //The outer states of 1st layer double OL2[LAYER2_NEURONS]; //The outer states of 2nd layer double EL1[LAYER1_NEURONS]; //The errors of the 1st layer double EL2[LAYER2_NEURONS]; //The errors of the 2nd layer /** * Calculates the derivative of a neuron output **/ double NeuronOutputDerivative(double output) { return output * (1.0 - output); } /** * Calculates the inner state of all neurons <i> of a given layer **/ void CalculateInnerStates(double *Inputs, double *InnerStates, double *Weights, int InputSize, int neurons) { #pragma omp parallel for schedule(static, 10) for (int i=0; i<neurons; i++) { InnerStates[i] = 1.0 * UNITY_NEURON ? Weights[i*InputSize + InputSize - UNITY_NEURON] : 0.0; //The "unity" neuron for (int j=0; j<InputSize-UNITY_NEURON; j++) { InnerStates[i] += Inputs[j] * Weights[i*(InputSize) + j]; } } } /** * Calculates the outer state of all neurons <i> of a given layer **/ void CalculateOuterStates(double *InnerStates, double *OuterStates, int neurons) { for (int i=0; i<neurons; i++) { OuterStates[i] = 1 / (1+exp(-InnerStates[i])); } } /** * Activates a specific layer **/ void ActivateLayer(double *Inputs, double *InnerStates, double *Outputs, double *Weights, int inputSize, int neurons) { CalculateInnerStates(Inputs, InnerStates, Weights, inputSize, neurons); CalculateOuterStates(InnerStates, Outputs, neurons); } /** * Activates the whole Neural Network **/ void ActivateNeuralNetwork(double *Inputs) { ActivateLayer(Inputs, DL1, OL1, &WL1[0][0], INPUT_SIZE+UNITY_NEURON, LAYER1_NEURONS); ActivateLayer(Inputs, DL2, OL2, &WL2[0][0], LAYER1_NEURONS+UNITY_NEURON, LAYER2_NEURONS); } /** * Initializes the weights of single layer **/ void InitializeLayerWeights(double *Weights, int neurons, int inps) { for (int i=0; i<neurons; i++) { for (int j=0; j<inps; j++) { Weights[i*(inps) + j] = ((double)rand()) / ((double)RAND_MAX); } } } /** * Initializes the weights of the Neural Network **/ void InitializeAllWeights() { InitializeLayerWeights(&WL1[0][0], LAYER1_NEURONS, INPUT_SIZE+UNITY_NEURON); InitializeLayerWeights(&WL2[0][0], LAYER2_NEURONS, LAYER1_NEURONS+UNITY_NEURON); } /** * Calculates the output layer's errors **/ void OutputLayerErrors(double *outputs, double *expected, double *errors, int neurons) { for (int i=0; i<neurons; i++) { errors[i] = (expected[i] - outputs[i]) * NeuronOutputDerivative(outputs[i]); } } /** * Calculates a hidden layer's errors **/ void InnerLayerErrors(double *curOutputs, double *nextWeights, double *curErrors, double *nextErrors, int curNeurons, int nextNeurons) { for (int c=0; c<curNeurons; c++) { double myError = 0.0; for (int n=0; n<nextNeurons; n++) { myError += nextWeights[n*curNeurons + c] * nextErrors[n]; } curErrors[c] = myError * NeuronOutputDerivative(curOutputs[c]); } } /** * Updates a layer's weights **/ void UpdateLayer(double *weights, double *errors, double *inputs, int neurons, int inputsNum) { #pragma omp parallel for schedule(static, 10) for (int i=0; i<neurons; i++) { for (int j=0; j<inputsNum; j++) { weights[i*inputsNum + j] += LEARNING_RATE * errors[i] * inputs[j]; } } } /** * Updates all layers's weights **/ void UpdateLayers(double *inputs) { UpdateLayer(&WL1[0][0], &EL1[0], inputs, LAYER1_NEURONS, INPUT_SIZE+UNITY_NEURON); UpdateLayer(&WL2[0][0], &EL2[0], &OL1[0], LAYER2_NEURONS, LAYER1_NEURONS+UNITY_NEURON); } /** * Performs the Back-Propagation algorithm to calculate the errors **/ void ErrorBackPropagation(double *GoldenOutputs) { OutputLayerErrors(&OL2[0], GoldenOutputs, &EL2[0], LAYER2_NEURONS); InnerLayerErrors(&OL1[0], &WL2[0][0], &EL1[0], &EL2[0], LAYER1_NEURONS, LAYER2_NEURONS); } /** * Trains the Neural Network by executing the back-propagation algorithm * and re-calculating all weights **/ void TrainNeuralNetwork(double *inputs, double *GoldenOutputs) { ErrorBackPropagation(GoldenOutputs); UpdateLayers(inputs); } /** * Acquires the inputs **/ void AcquireInputs(double *array, int size) { for (int i=0; i<size; i++) array[i] = -1 + 2 * ((double) rand()) / ((double) RAND_MAX); } /** * Acquired the correct outputs **/ void AcquireGoldenOutputs(double *array, int size) { for (int i=0; i<size; i++) array[i] = ((double) rand()) / ((double) RAND_MAX); } /** * Mean Square Error **/ double MeanSquareError(double *RealOutputs, double *GoldenOutputs, int outputSize) { double error = 0.0; for (int i=0; i<outputSize; i++) error += (RealOutputs[i]-GoldenOutputs[i]) * (RealOutputs[i]-GoldenOutputs[i]); return sqrt(error); } /** * The main program **/ int main() { printf("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("This program implements a Neural Network of %d Layers.\n", 2); printf("Inputs: %d, Hidden layer neurons: %d, Output layer neurons: %d\n", INPUT_SIZE, LAYER1_NEURONS, LAYER2_NEURONS); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"); __time_t seconds; srand(seconds); double *DataIn = (double*) calloc(INPUT_SIZE, sizeof(double)); double *GoldenData = (double*) calloc(LAYER2_NEURONS, sizeof(double)); int steps = 0; int max_steps = 100000; //printf("Set number of steps: "); //scanf("%d", &max_steps); InitializeAllWeights(); AcquireInputs(DataIn, INPUT_SIZE); AcquireGoldenOutputs(GoldenData, LAYER2_NEURONS); if (DEBUG==6) { printf("\nThe input was:\n"); for (int i=0; i<INPUT_SIZE; i++) printf("%.10lf\n", DataIn[i]); printf("\n"); } ActivateNeuralNetwork(DataIn); //First activation do { steps++; TrainNeuralNetwork(DataIn, GoldenData); ActivateNeuralNetwork(DataIn); /* if (steps%1000==0) { printf("STEP %8d ==> ", steps); printf("Mean Square Error: %.20lf\n", MeanSquareError(OL2, GoldenData, LAYER2_NEURONS)); }*/ } while (steps<max_steps); printf("\nSteps completed: %d", steps); printf("\nThe final output compared to the golden output is:\n"); for (int i=0; i<LAYER2_NEURONS; i++) printf(" Golden: %.13lf <o> Real: %.13lf \n", GoldenData[i], OL2[i]); printf("\n"); printf("The final Mean Square Error is: %.15lf\n", MeanSquareError(OL2, GoldenData, LAYER2_NEURONS)); free(DataIn); free(GoldenData); return 0; }
callback.h
#define _BSD_SOURCE #define _DEFAULT_SOURCE #include <stdio.h> #include <inttypes.h> #include <omp.h> #include <ompt.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" static const char* ompt_thread_type_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static const char* ompt_task_status_t_values[] = { NULL, "ompt_task_complete", "ompt_task_yield", "ompt_task_cancel", "ompt_task_others" }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_do", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static void format_task_type(int type, char *buffer) { char *progress = buffer; if (type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if (type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if (type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if (type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if (type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if (type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if (type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if (type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if (type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static ompt_set_callback_t ompt_set_callback; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { int task_type, thread_num; ompt_frame_t *frame; ompt_data_t *task_parallel_data; ompt_data_t *task_data; int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame, &task_parallel_data, &thread_num); char buffer[2048]; format_task_type(task_type, buffer); if (frame) printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, " "task_type=%s=%d, thread_num=%d\n", ompt_get_thread_data()->value, level, exists_task ? task_parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame, frame->enter_frame, buffer, task_type, thread_num); } #define get_frame_address(level) __builtin_frame_address(level) #define print_frame(level) \ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \ ompt_get_thread_data()->value, level, get_frame_address(level)) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts an LD instruction which accounts for another 4 bytes. In contrast to // X86 this instruction is always there, even for void runtime functions. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, addr) static void on_ompt_callback_mutex_acquire( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; } } static void on_ompt_callback_sync_region( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_do) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_idle( ompt_scope_endpoint_t endpoint) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_idle_begin:\n", ompt_get_thread_data()->value); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_idle_end:\n", ompt_get_thread_data()->value); break; } } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); break; } } static void on_ompt_callback_lock_init( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_type_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; } } static void on_ompt_callback_master( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* parallel_data, uint32_t requested_team_size, ompt_invoker_t invoker, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame, encountering_task_frame->enter_frame, parallel_data->value, requested_team_size, codeptr_ra, invoker); } static void on_ompt_callback_parallel_end( ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, ompt_invoker_t invoker, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, invoker, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(type & ompt_task_initial) { ompt_data_t *parallel_data; ompt_get_parallel_info(0, &parallel_data, NULL); if(parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); } printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame : NULL, encountering_task_frame ? encountering_task_frame->enter_frame : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status); if(prior_task_status == ompt_task_complete) { printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_task_dependences( ompt_data_t *task_data, const ompt_task_dependence_t *deps, int ndeps) { printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_type_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_type_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { ompt_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame, omptTaskFrame->enter_frame); return 0; //success } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_callback(ompt_callback_mutex_acquire); register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_callback(ompt_callback_nest_lock); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_control_tool); register_callback(ompt_callback_flush); register_callback(ompt_callback_cancel); register_callback(ompt_callback_idle); register_callback(ompt_callback_implicit_task); register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_callback(ompt_callback_work); register_callback(ompt_callback_master); register_callback(ompt_callback_parallel_begin); register_callback(ompt_callback_parallel_end); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); register_callback(ompt_callback_task_dependences); register_callback(ompt_callback_task_dependence); register_callback(ompt_callback_thread_begin); register_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; }
ten_tusscher_2004_epi_S2_6.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_6.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
implicit_task_data.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // This test checks that values stored in task_data in a barrier_begin event // are still present in the corresponding barrier_end event. // Therefore, callback implementations different from the ones in callback.h are necessary. // This is a test for an issue reported in // https://github.com/OpenMPToolsInterface/LLVM-openmp/issues/39 #define _BSD_SOURCE #include <stdio.h> #include <unistd.h> #include <inttypes.h> #include <omp.h> #include <omp-tools.h> static const char* ompt_thread_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_thread_data_t ompt_get_thread_data; int main() { #pragma omp parallel num_threads(4) { #pragma omp master { sleep(1); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // master thread implicit barrier at parallel end // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra={{0x[0-f]*}} // worker thread implicit barrier at parallel end // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id=0, task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=0, task_id=[[TASK_ID]], codeptr_ra=[[NULL]] return 0; } static void on_ompt_callback_thread_begin( ompt_thread_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_sync_region( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: task_data->value = ompt_get_unique_id(); if (kind == ompt_sync_region_barrier_implicit) printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: if (kind == ompt_sync_region_barrier_implicit) printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: if (kind == ompt_sync_region_barrier_implicit) printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: if (kind == ompt_sync_region_barrier_implicit) printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t *tool_data) { ompt_set_callback_t ompt_set_callback; ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_thread_begin); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; }
special_accumulation_ops.h
// // @author raver119@gmail.com // #ifndef LIBND4J_SPECIAL_ACCUMULATION_OPS_H #define LIBND4J_SPECIAL_ACCUMULATION_OPS_H #include <templatemath.h> #include <helpers/TAD.h> //#include <ops/ops.h> //#include <loops/reduce.h> namespace simdOps { template<typename T> class LogSumExp { public: static const bool requiresSpecialAccumulation = true; op_def static T startingValue(const T *input) { return (T) 0.0f; } op_def static T merge(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T update(T old, T opOutput, T *extraParams) { return opOutput + old; } op_def static T op(T d1, T d2) { return nd4j::math::nd4j_exp<T>(d1 - d2); } op_def static T op(T d1, T* extraParams) { return nd4j::math::nd4j_exp<T>(d1 - extraParams[0]); } op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) { return extraParams[0] + nd4j::math::nd4j_log<T>(reduction); } #ifdef __CUDACC__ __device__ static inline void aggregatePartials(T *sPartials, int tid, int numItems, T *extraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. int floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = update(sPartials[tid - floorPow2], sPartials[tid], extraParams); } __syncthreads(); } for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) { sPartials[tid] = update(sPartials[tid], sPartials[tid + activeThreads], extraParams); } __syncthreads(); } } static inline __device__ void execSpecialCuda( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { // we assume that RESULT already holds max values //shared memory space for storing intermediate results __shared__ T *sPartials; // __shared__ shape::TAD *tad; __shared__ Nd4jLong tadLength; __shared__ Nd4jLong tadRank; __shared__ Nd4jLong numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); auto xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], op(dx[xOffset], result[r]), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), &result[r]); __syncthreads(); if (threadIdx.x == 0) result[r] = postProcess(sPartials[threadIdx.x], tadLength, &result[r]); } } #endif static void execSpecial(T *x, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset) { Nd4jLong resultLength = shape::length(resultShapeInfoBuffer); auto tadOnlyShapeInfo = tadShapeInfo; auto tadOffsets = tadOffset; shape::TAD *tad = nullptr; if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) { tad = new shape::TAD(xShapeInfo, dimension, dimensionLength); tad->createTadOnlyShapeInfo(); tad->createOffsets(); if (tad->dimensionLength < 1) { delete tad; return; } tadOnlyShapeInfo = tad->tadOnlyShapeInfo; tadOffsets = tad->tadOffsets; } const Nd4jLong tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); auto numTads = shape::length(xShapeInfo) / tadLength; auto tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); int tadsPerThread = resultLength / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); if (tadEWS > 0 && (numTads == 1 || shape::isVector(tadOnlyShapeInfo) || shape::isScalar(tadOnlyShapeInfo))) { #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) for (int i = 0; i < resultLength; i++) { T *iter = x + tadOffsets[i]; T start = startingValue(iter); if (tadEWS == 1) { for (int j = 0; j < tadLength; j++) { start = update(start, op(iter[j], result[i]), extraParams); } } else { for (int j = 0; j < tadLength; j++) { start = update(start, op(iter[j * tadEWS], result[i]), extraParams); } } result[i] = postProcess(start, tadLength, &result[i]); } } else { auto tadShape = shape::shapeOf(tadOnlyShapeInfo); auto tadStride = shape::stride(tadOnlyShapeInfo); auto tadRank = shape::rank(tadOnlyShapeInfo); #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) for (int i = 0; i < resultLength; i++) { auto offset = tadOffsets[i]; Nd4jLong xCoord[MAX_RANK]; T start = startingValue(x + offset); for (int j = 0; j < tadLength; j++) { shape::ind2subC(tadRank, tadShape, j, xCoord); auto xOffset = shape::getOffset(offset, tadShape, tadStride, xCoord, tadRank); start = update(start, op(x[xOffset], result[i]), extraParams); } result[i] = postProcess(start, tadLength, &result[i]);; } } if (tad != nullptr) delete tad; } }; } #endif //LIBND4J_SPECIAL_ACCUMULATION_OPS_H
hmm.c
/* * Copyright (C) 2017 by Benedict Paten (benedictpaten@gmail.com) * * Released under the MIT license, see LICENSE.txt */ #include "margin.h" // OpenMP #if defined(_OPENMP) #include <omp.h> #define CELL_BUFFER_SIZE 1000 #endif inline double logAddP(double a, double b, bool maxNotSum) { /* * Local function for doing addition of logs or (if doing Viterbi style calculation), to take the max. */ return maxNotSum ? (a > b ? a : b) : stMath_logAddExact(a, b); } /* * Functions for the read partitioning hmm object stRPHmm. */ void stRPHmmParameters_destruct(stRPHmmParameters *params) { free(params->hetSubModel); free(params->hetSubModelSlow); free(params->readErrorSubModel); free(params->readErrorSubModelSlow); free(params); } static void printMatrix(FILE *fH, double *matrixSlow, uint16_t *matrixFast) { for(int64_t i=0; i<ALPHABET_SIZE; i++) { fprintf(fH, "\t\t\t"); for(int64_t j=0; j<ALPHABET_SIZE; j++) { fprintf(fH, " %f, ", exp(matrixSlow[i*ALPHABET_SIZE + j])); } fprintf(fH, "\n"); } } double *getColumnBaseComposition(stRPColumn *column, int64_t pos) { /* * Get the observed counts for each base seen at a particular position in a column */ double *baseCounts = st_calloc(ALPHABET_SIZE, sizeof(double)); for (int64_t i=0; i<column->depth; i++) { stProfileSeq *seq = column->seqHeaders[i]; if (pos >= seq->refStart && pos < seq->length+seq->refStart) { for(int64_t j=0; j<ALPHABET_SIZE; j++) { baseCounts[j] += getProb(&(seq->profileProbs[(pos - seq->refStart) * ALPHABET_SIZE]), j); } } } return baseCounts; } double *getProfileSequenceBaseCompositionAtPosition(stSet *profileSeqs, int64_t pos) { /* * Get the expected count of each alphabet character in the profile sequences, returned * as an array. */ double *baseCounts = st_calloc(ALPHABET_SIZE, sizeof(double)); stSetIterator *it = stSet_getIterator(profileSeqs); stProfileSeq *pSeq; while((pSeq = stSet_getNext(it)) != NULL) { if (pos >= pSeq->refStart && pos < pSeq->refStart+pSeq->length) { for(int64_t j=0; j<ALPHABET_SIZE; j++) { baseCounts[j] += getProb(&(pSeq->profileProbs[(pos - pSeq->refStart)*ALPHABET_SIZE]), j); } } } return baseCounts; } void stRPHmmParameters_printParameters(stRPHmmParameters *params, FILE *fH) { /* * Print the parameters in the parameters object in a human readable form. */ fprintf(fH, "\tRead Partitioning HMM Parameters\n"); fprintf(fH, "\t\tAlphabet_size: %i\n" "\t\tMax_read coverage_depth: %" PRIi64 "\n" "\t\tMax_not sum transitions?: %i\n" "\t\tMax_partitions in a column of an HMM: %" PRIi64 "\n" "\t\tMin read coverage to support phasing between heterozygous sites: %" PRIi64 "\n", ALPHABET_SIZE, params->maxCoverageDepth, (int)params->maxNotSumTransitions, params->maxPartitionsInAColumn, params->minReadCoverageToSupportPhasingBetweenHeterozygousSites); fprintf(fH, "\t\tHeterozygous substitution rates:\n"); printMatrix(fH, params->hetSubModelSlow, params->hetSubModel); fprintf(fH, "\t\tRead error substitution rates:\n"); printMatrix(fH, params->readErrorSubModelSlow, params->readErrorSubModel); fprintf(fH, "\t\tIterations of parameter learning: %" PRIi64 "\n", params->trainingIterations); fprintf(fH, "\t\tInclude deletions as gap character? : %i\n", (int) params->gapCharactersForDeletions); fprintf(fH, "\t\tUse reference prior?: %i\n", (int) params->useReferencePrior); fprintf(fH, "\t\tFilter bad reads?: %i\n", (int)params->filterBadReads); fprintf(fH, "\t\tFilter match threshold: %f\n", params->filterMatchThreshold); fprintf(fH, "\t\tFilter reads with any of these sam flags set: %d\n", params->filterAReadWithAnyOneOfTheseSamFlagsSet); fprintf(fH, "\t\tInclude inverted partitions?: %i\n", (int) params->includeInvertedPartitions); fprintf(fH, "\t\tEstimate read error probs empirically?: %i\n", (int) params->estimateReadErrorProbsEmpirically); fprintf(fH, "\t\tFiltering likely homoygous sites? : %i\n", (int)params->filterLikelyHomozygousSites); fprintf(fH, "\t\tminSecondMostFrequentBaseFilter: %f\n", params->minSecondMostFrequentBaseFilter); fprintf(fH, "\t\tminSecondMostFrequentBaseLogProbFilter: %f\n", params->minSecondMostFrequentBaseLogProbFilter); fprintf(fH, "\t\tRounds of iterative refinement: %" PRIi64 "\n", params->roundsOfIterativeRefinement); fprintf(fH, "\t\tWriting gvcf? : %i\n", (int)params->writeGVCF); fprintf(fH, "\t\tVerbose Attributes:\n"); if (params->verboseTruePositives) fprintf(fH, "\t\t\tTRUE_POSITIVES\n"); if (params->verboseFalsePositives) fprintf(fH, "\t\t\tFALSE_POSITIVES\n"); if (params->verboseFalseNegatives) fprintf(fH, "\t\t\tFALSE_NEGATIVES\n"); } static void calculateReadErrorSubModel(double *readErrorSubModel, int64_t refStart, int64_t length, uint64_t *haplotypeSeq, stSet *reads) { /* * Returns a normalized substitution matrix estimating the probability of read error substitutions by ML. */ stSetIterator *readIt = stSet_getIterator(reads); stProfileSeq *pSeq; int64_t end = refStart + length; while((pSeq = stSet_getNext(readIt)) != NULL) { // Get the overlapping interval int64_t i = refStart > pSeq->refStart ? refStart : pSeq->refStart; int64_t j = end < pSeq->refStart + pSeq->length ? end : pSeq->refStart + pSeq->length; // For each pair of read and haplotype characters for(;i<j;i++) { // Check coordinates in bounds assert(i - refStart >= 0 && i-refStart < length); assert(i - pSeq->refStart >= 0 && i - pSeq->refStart < pSeq->length); int64_t hapChar = haplotypeSeq[i - refStart]; for(int64_t readChar=0; readChar<ALPHABET_SIZE; readChar++) { double probOfReadChar = getProb(&(pSeq->profileProbs[(i-pSeq->refStart) * ALPHABET_SIZE]), readChar); *getSubstitutionProbSlow(readErrorSubModel, hapChar, readChar) += probOfReadChar; } } } stSet_destructIterator(readIt); } void normaliseSubstitutionMatrix(double *subMatrix) { /* * Normalise matrix so that counts are converted to conditional probabilities of observing * derived character given source character. */ for(int64_t fromChar=0; fromChar<ALPHABET_SIZE; fromChar++) { double totalSubCount = 0.0; for(int64_t toChar=0; toChar<ALPHABET_SIZE; toChar++) { totalSubCount += *getSubstitutionProbSlow(subMatrix, fromChar, toChar); } for(int64_t toChar=0; toChar<ALPHABET_SIZE; toChar++) { double p = *getSubstitutionProbSlow(subMatrix, fromChar, toChar) / totalSubCount; *getSubstitutionProbSlow(subMatrix, fromChar, toChar) = p <= 0.0001 ? 0.0001 : p; } } } void stRPHmmParameters_setReadErrorSubstitutionParameters(stRPHmmParameters *params, double *readErrorSubModel) { /* * Set the substitution parameters of the read error substitution model from the given matrix. */ for(int64_t j=0; j<ALPHABET_SIZE; j++) { for(int64_t k=0; k<ALPHABET_SIZE; k++) { setSubstitutionProb(params->readErrorSubModel, params->readErrorSubModelSlow, j, k, *getSubstitutionProbSlow(readErrorSubModel, j, k)); } } } double *getEmptyReadErrorSubstitutionMatrix(stRPHmmParameters *params) { /* * Get an empty substitution matrix initializaed with the pseudo counts specified by params. */ double *readErrorSubModel = st_calloc(ALPHABET_SIZE * ALPHABET_SIZE, sizeof(double)); for(int64_t j=0; j<ALPHABET_SIZE*ALPHABET_SIZE; j++) { readErrorSubModel[j] = params->offDiagonalReadErrorPseudoCount; } for(int64_t j=0; j<ALPHABET_SIZE; j++) { readErrorSubModel[j*ALPHABET_SIZE + j] = params->onDiagonalReadErrorPseudoCount; } return readErrorSubModel; } void stRPHmmParameters_learnParameters(stRPHmmParameters *params, stList *profileSequences, stHash *referenceNamesToReferencePriors) { /* * Learn the substitution matrices iteratively, updating the params object in place. * Iterations is the number of cycles of stochastic parameter search to do. */ // For each iteration construct a set of HMMs and estimate the parameters from it. for(int64_t i=0; i<params->trainingIterations; i++) { st_logDebug("\tStarting training iteration %" PRIi64 "\n", i); // Substitution model for haplotypes to reads double *readErrorSubModel = getEmptyReadErrorSubstitutionMatrix(params); stList *hmms = getRPHmms(profileSequences, referenceNamesToReferencePriors, params); for(int64_t j=0; j<stList_length(hmms); j++) { stRPHmm *hmm = stList_get(hmms, j); // Run the forward-backward algorithm stRPHmm_forwardBackward(hmm); // Now compute a high probability path through the hmm stList *path = stRPHmm_forwardTraceBack(hmm); // Compute the genome fragment stGenomeFragment *gF = stGenomeFragment_construct(hmm, path); // Get partitioned sequences stSet *reads1 = stRPHmm_partitionSequencesByStatePath(hmm, path, 1); stSet *reads2 = stRPHmm_partitionSequencesByStatePath(hmm, path, 0); // Estimate the read error substitution parameters calculateReadErrorSubModel(readErrorSubModel, gF->refStart, gF->length, gF->haplotypeString1, reads1); calculateReadErrorSubModel(readErrorSubModel, gF->refStart, gF->length, gF->haplotypeString2, reads2); // Cleanup stSet_destruct(reads1); stSet_destruct(reads2); stGenomeFragment_destruct(gF); stList_destruct(path); } // Cleanup stList_destruct(hmms); // Normalise the probabilities normaliseSubstitutionMatrix(readErrorSubModel); // Update the read error substitution parameters of the parameters object stRPHmmParameters_setReadErrorSubstitutionParameters(params, readErrorSubModel); // Cleanup free(readErrorSubModel); //Log the parameters info if(st_getLogLevel() == debug) { st_logDebug("\tParameters learned after iteration %" PRIi64 " of training:\n", i); stRPHmmParameters_printParameters(params, stderr); } } } static int cmpint64(int64_t i, int64_t j) { return i > j ? 1 : i < j ? -1 : 0; } inline int stRPHmm_cmpFn(const void *a, const void *b) { /* * Compares two read partitioning HMMs by coordinate on the reference. * Will return equal only if they are the same HMM, with the same memory * address, otherwise compares pointers for equal HMMs. * */ stRPHmm *hmm1 = (stRPHmm *)a, *hmm2 = (stRPHmm *)b; int i = strcmp(hmm1->referenceName, hmm2->referenceName); if(i == 0) { i = cmpint64(hmm1->refStart, hmm2->refStart); if(i == 0) { // Sort by descending order of length i = cmpint64(hmm2->refLength, hmm1->refLength); if(i == 0) { i = hmm1 > hmm2 ? 1 : (hmm1 < hmm2 ? -1 : 0); } } } return i; } stRPHmm *stRPHmm_construct(stProfileSeq *profileSeq, stReferencePriorProbs *referencePriorProbs, stRPHmmParameters *params) { /* * Create a read partitioning HMM representing the single sequence profile. */ stRPHmm *hmm = st_calloc(1, sizeof(stRPHmm)); // Set reference coordinates hmm->referenceName = stString_copy(profileSeq->referenceName); hmm->refStart = profileSeq->refStart; hmm->refLength = profileSeq->length; // Add the single profile sequence to the list of the hmm's sequences hmm->profileSeqs = stList_construct(); stList_append(hmm->profileSeqs, profileSeq); hmm->parameters = params; // Parameters for the model for computation, this is shared by different HMMs hmm->referencePriorProbs = referencePriorProbs; assert(stString_eq(hmm->referenceName, referencePriorProbs->referenceName)); assert(hmm->refStart >= referencePriorProbs->refStart); assert(hmm->refStart + hmm->refLength <= referencePriorProbs->refStart + referencePriorProbs->length); hmm->columnNumber = 1; // The number of columns in the model, initially just 1 hmm->maxDepth = 1; // The maximum number of states in a column, initially just 1 // Create the first column of the model stProfileSeq **seqHeaders = st_malloc(sizeof(stProfileSeq *)); seqHeaders[0] = profileSeq; uint8_t **seqs = st_malloc(sizeof(uint8_t *)); seqs[0] = profileSeq->profileProbs; stRPColumn *column = stRPColumn_construct(hmm->refStart, hmm->refLength, 1, seqHeaders, seqs, referencePriorProbs); hmm->firstColumn = column; hmm->lastColumn = column; // Add two cells to the column to represent the two possible partitions of the single profile sequence stRPCell *cell = stRPCell_construct(1); column->head = cell; cell->nCell = stRPCell_construct(0); return hmm; } void stRPHmm_destruct(stRPHmm *hmm, bool destructColumns) { /* * Free memory owned by the hmm, including columns. */ free(hmm->referenceName); stList_destruct(hmm->profileSeqs); if(destructColumns) { // Cleanup the columns of the hmm stRPColumn *column = hmm->firstColumn; while(1) { stRPMergeColumn *mColumn = column->nColumn; stRPColumn_destruct(column); if(mColumn == NULL) { break; } column = mColumn->nColumn; stRPMergeColumn_destruct(mColumn); } } free(hmm); } void stRPHmm_destruct2(stRPHmm *hmm) { /* * Cleans up hmm and columns */ stRPHmm_destruct(hmm, 1); } stList *stRPHmm_forwardTraceBack(stRPHmm *hmm) { /* * Traces back through the forward matrix picking the most probable path. * (yes, this is non-symmetric) * Returns the result as a list of cells, one from each column. */ stList *path = stList_construct(); stRPColumn *column = hmm->lastColumn; // Pick cell in the last column with highest probability stRPCell *cell = column->head; double maxProb = cell->forwardLogProb; stRPCell *maxCell = cell; while((cell = cell->nCell) != NULL) { if(cell->forwardLogProb > maxProb) { maxProb = cell->forwardLogProb; maxCell = cell; } } stList_append(path, maxCell); // Add chosen cell to output // Walk back through previous columns while(column->pColumn != NULL) { // Get previous merge cell stRPMergeCell *mCell = stRPMergeColumn_getPreviousMergeCell(maxCell, column->pColumn); assert(mCell != NULL); // Switch to previous column column = column->pColumn->pColumn; // Walk through cells in the previous column to find the one with the // highest forward probability that transitions to maxCell cell = column->head; maxCell = NULL; maxProb = ST_MATH_LOG_ZERO; do { // If compatible and has greater probability if(stRPMergeColumn_getNextMergeCell(cell, column->nColumn) == mCell && cell->forwardLogProb > maxProb) { maxProb = cell->forwardLogProb; maxCell = cell; } } while((cell = cell->nCell) != NULL); assert(maxCell != NULL); stList_append(path, maxCell); } stList_reverse(path); // So cells go in order return path; } stSet *stRPHmm_partitionSequencesByStatePath(stRPHmm *hmm, stList *path, bool partition1) { /* * For an hmm and path through the hmm (e.g. computed with stRPHmm_forwardTraceBack) returns the * set of sequences in the hmm that are predicted to come from one given haplotype. */ stSet *seqsInHap1 = stSet_construct(); // For each cell/column pair stRPColumn *column = hmm->firstColumn; for(int64_t i=0; i<stList_length(path); i++) { stRPCell *cell = stList_get(path, i); // Get sequences in first or second partition for(int64_t j=0; j<column->depth; j++) { if((seqInHap1(cell->partition, j) && partition1) || (!seqInHap1(cell->partition, j) && !partition1)) { stSet_insert(seqsInHap1, column->seqHeaders[j]); // todo add to readHaplotypes } } if(column->nColumn != NULL) { column = column->nColumn->nColumn; } } return seqsInHap1; } void stRPHmm_print(stRPHmm *hmm, FILE *fileHandle, bool includeColumns, bool includeCells) { /* * Prints a debug friendly representation of the state of an hmm. */ //Header line fprintf(fileHandle, "HMM REF_NAME: %s REF_START: %" PRIi64 " REF_LENGTH %" PRIi64 " COLUMN_NUMBER %" PRIi64 " MAX_DEPTH: %" PRIi64 " FORWARD_PROB: %f BACKWARD_PROB: %f\n", hmm->referenceName, hmm->refStart, hmm->refLength, hmm->columnNumber, hmm->maxDepth, (float)hmm->forwardLogProb, (float)hmm->backwardLogProb); if(includeColumns) { stRPColumn *column = hmm->firstColumn; int64_t i=0; while(1) { fprintf(fileHandle, "Column %" PRIi64 "\n", i++); // Print the column stRPColumn_print(column, fileHandle, includeCells); if(column->nColumn == NULL) { break; } // Print the merge column stRPMergeColumn_print(column->nColumn, fileHandle, includeCells); column = column->nColumn->nColumn; } } } stRPHmm *stRPHmm_fuse(stRPHmm *leftHmm, stRPHmm *rightHmm) { /* * Fuses together two hmms, such that leftHmm and rightHMM * are on the same reference sequence and non-overlapping and * left hmm precedes right hmm on the reference sequence. * Returns fused hmm, destroys input hmms in the process. */ // Checks if(!stString_eq(leftHmm->referenceName, rightHmm->referenceName)) { st_errAbort("Attempting to fuse two hmms not on the same reference sequence"); } if(stRPHmm_overlapOnReference(leftHmm, rightHmm)) { st_errAbort("Attemping to fuse two hmms that overlap in reference coordinates"); } if(leftHmm->refStart >= rightHmm->refStart) { st_errAbort("Left hmm does not precede right hmm in reference coordinates for merge"); } // Create a new empty hmm stRPHmm *hmm = st_malloc(sizeof(stRPHmm)); // Set the reference interval hmm->referenceName = stString_copy(leftHmm->referenceName); hmm->refStart = leftHmm->refStart; hmm->refLength = rightHmm->refStart + rightHmm->refLength - leftHmm->refStart; // Create the combined list of profile seqs hmm->profileSeqs = stList_copy(leftHmm->profileSeqs, NULL); stList_appendAll(hmm->profileSeqs, rightHmm->profileSeqs); // Set column number hmm->columnNumber = leftHmm->columnNumber + rightHmm->columnNumber; // Max depth hmm->maxDepth = leftHmm->maxDepth > rightHmm->maxDepth ? leftHmm->maxDepth : rightHmm->maxDepth; // Parameters if(leftHmm->parameters != rightHmm->parameters) { st_errAbort("HMM parameters differ in fuse function, panic."); } hmm->parameters = leftHmm->parameters; // Set reference position prior probabilities if(leftHmm->referencePriorProbs != rightHmm->referencePriorProbs) { st_errAbort("Hmm reference prior probs differ in fuse function, panic."); } hmm->referencePriorProbs = leftHmm->referencePriorProbs; // Make columns to fuse left hmm and right hmm's columns stRPMergeColumn *mColumn = stRPMergeColumn_construct(0, 0); // Links leftHmm->lastColumn->nColumn = mColumn; mColumn->pColumn = leftHmm->lastColumn; // Add merge cell to connect the cells in the two columns stRPMergeCell_construct(0, 0, mColumn); int64_t gapLength = rightHmm->refStart - (leftHmm->refStart + leftHmm->refLength); assert(gapLength >= 0); if(gapLength > 0) { // Make column in the gap stRPColumn *column = stRPColumn_construct(leftHmm->refStart + leftHmm->refLength, gapLength, 0, NULL, NULL, hmm->referencePriorProbs); // Links mColumn->nColumn = column; column->pColumn = mColumn; // Make cell for empty column column->head = stRPCell_construct(0); // Add right merge column mColumn = stRPMergeColumn_construct(0, 0); // Add merge cell to connect the cells in the two columns stRPMergeCell_construct(0, 0, mColumn); // Links column->nColumn = mColumn; mColumn->pColumn = column; // Increase the column number to account for the introduced gap column hmm->columnNumber += 1; } mColumn->nColumn = rightHmm->firstColumn; rightHmm->firstColumn->pColumn = mColumn; // Initialise first/last columns of fused hmm hmm->firstColumn = leftHmm->firstColumn; hmm->lastColumn = rightHmm->lastColumn; // Cleanup stRPHmm_destruct(leftHmm, 0); stRPHmm_destruct(rightHmm, 0); return hmm; } void stRPHmm_alignColumns(stRPHmm *hmm1, stRPHmm *hmm2) { /* * Align the input hmms, modifying them in place, so that they each * (1) span the same reference interval, * (2) have the same number of columns, and * (3) so that for all i, column i in each model span the same interval. */ assert(hmm1 != hmm2); // If the two hmms don't overlap in reference space then complain if(!stRPHmm_overlapOnReference(hmm1, hmm2)) { st_errAbort("Attempting to align two HMMs that do not overlap in reference coordinate space"); } // If hmm1 starts after hmm2 then call the other way around if(hmm1->refStart > hmm2->refStart) { stRPHmm_alignColumns(hmm2, hmm1); return; } // If hmm1 starts before hmm2 add an empty prefix interval to hmm2 // so they have the same start coordinate if(hmm1->refStart < hmm2->refStart) { // Create column stRPColumn *column = stRPColumn_construct(hmm1->refStart, hmm2->refStart - hmm1->refStart, 0, NULL, NULL, hmm1->referencePriorProbs); // Add cell column->head = stRPCell_construct(0); // Create merge column stRPMergeColumn *mColumn = stRPMergeColumn_construct(0,0); // Add merge cell stRPMergeCell_construct(0, 0, mColumn); // Create links hmm2->firstColumn->pColumn = mColumn; mColumn->nColumn = hmm2->firstColumn; mColumn->pColumn = column; column->nColumn = mColumn; assert(column->pColumn == NULL); hmm2->firstColumn = column; //Adjust start and length of hmm2 interval hmm2->refLength += hmm2->refStart - hmm1->refStart; hmm2->refStart = hmm1->refStart; // Increase column number hmm2->columnNumber++; } // If hmm1 has a shorter reference interval length than hmm2 then call the function // with the hmms reversed. if(hmm1->refLength < hmm2->refLength) { stRPHmm_alignColumns(hmm2, hmm1); return; } // If hmm1 has a longer reference interval than hmm2 append an empty suffix // interval to hmm2 to make them the same length. if(hmm1->refLength > hmm2->refLength) { // Create column stRPColumn *column = stRPColumn_construct(hmm2->lastColumn->refStart + hmm2->lastColumn->length, hmm1->refLength - hmm2->refLength, 0, NULL, NULL, hmm1->referencePriorProbs); // Add cell column->head = stRPCell_construct(0); // Create merge column stRPMergeColumn *mColumn = stRPMergeColumn_construct(0, 0); // Add merge cell stRPMergeCell_construct(0, 0, mColumn); // Create links hmm2->lastColumn->nColumn = mColumn; mColumn->pColumn = hmm2->lastColumn; mColumn->nColumn = column; column->pColumn = mColumn; assert(column->nColumn == NULL); hmm2->lastColumn = column; //Adjust start and length of hmm2 interval hmm2->refLength = hmm1->refLength; // Increase column number hmm2->columnNumber++; } // Quick coordinate checks assert(hmm1->refStart == hmm2->refStart); assert(hmm1->refLength == hmm2->refLength); assert(hmm1->firstColumn->refStart == hmm1->refStart); assert(hmm2->firstColumn->refStart == hmm2->refStart); assert(hmm1->lastColumn->refStart + hmm1->lastColumn->length == hmm1->refStart + hmm1->refLength); assert(hmm2->lastColumn->refStart + hmm2->lastColumn->length == hmm2->refStart + hmm2->refLength); // At this point both hmms have the same reference interval // While one hmm has a shorter reference interval than the other split the other interval // otherwise move on to the next stRPColumn *column1 = hmm1->firstColumn; stRPColumn *column2 = hmm2->firstColumn; while(1) { assert(column1->refStart == column2->refStart); if(column1->length > column2->length) { stRPColumn_split(column1, column2->length, hmm1); assert(column1->nColumn->nColumn->refStart == column1->refStart + column2->length); } else if(column1->length < column2->length) { stRPColumn_split(column2, column1->length, hmm2); } assert(column1->refStart == column2->refStart); assert(column1->length == column2->length); // Now have equal length/start // There are no more columns, so break if(column1->nColumn == NULL) { assert(hmm1->lastColumn == column1); assert(column2->nColumn == NULL); assert(hmm2->lastColumn == column2); break; } column1 = column1->nColumn->nColumn; assert(column2->nColumn != NULL); column2 = column2->nColumn->nColumn; assert(column1 != NULL); assert(column2 != NULL); } assert(hmm1->columnNumber == hmm2->columnNumber); } static uint64_t intHashFn(const void *a) { return *(uint64_t *)a; } static int intEqualsFn(const void *key1, const void *key2) { return *(uint64_t *)key1 == *(uint64_t *)key2; } stRPCell **makeCell(uint64_t partition, stRPCell **pCell, stHash *seen) { /* * Make a cell for a column. */ // Make the cell stRPCell *cell = stRPCell_construct(partition); // Add the partition to those already seen assert(stHash_search(seen, &cell->partition) == NULL); stHash_insert(seen, &cell->partition, cell); // Link cells *pCell = cell; return &cell->nCell; } stRPHmm *stRPHmm_createCrossProductOfTwoAlignedHmm(stRPHmm *hmm1, stRPHmm *hmm2) { /* * For two aligned hmms (see stRPHmm_alignColumns) returns a new hmm that represents the * cross product of all the states of the two input hmms. */ // Do sanity checks that the two hmms have been aligned if(!stString_eq(hmm1->referenceName, hmm2->referenceName)) { st_errAbort("Trying to create cross product of two HMMs " "on different reference sequences"); } if(hmm1->refStart != hmm2->refStart) { st_errAbort("Trying to create cross product of two HMMs " "with different reference interval starts"); } if(hmm1->refLength != hmm2->refLength) { st_errAbort("Trying to create cross product of two HMMs " "with different reference interval length"); } if(hmm1->columnNumber != hmm2->columnNumber) { st_errAbort("Trying to create cross product of two HMMs " "with different column numbers"); } // Create a new empty hmm stRPHmm *hmm = st_calloc(1, sizeof(stRPHmm)); // Set the reference interval hmm->referenceName = stString_copy(hmm1->referenceName); hmm->refStart = hmm1->refStart; hmm->refLength = hmm1->refLength; // Create the combined list of profile seqs hmm->profileSeqs = stList_copy(hmm1->profileSeqs, NULL); stList_appendAll(hmm->profileSeqs, hmm2->profileSeqs); // Set column number hmm->columnNumber = hmm1->columnNumber; // Set substitution matrices if(hmm1->parameters != hmm2->parameters) { st_errAbort("Hmm parameters differ in fuse function, panic."); } hmm->parameters = hmm1->parameters; // Set reference position prior probabilities if(hmm1->referencePriorProbs != hmm2->referencePriorProbs) { st_errAbort("Hmm reference prior probs differ in hmm cross product function, panic."); } hmm->referencePriorProbs = hmm1->referencePriorProbs; // For each pair of corresponding columns stRPColumn *column1 = hmm1->firstColumn; stRPColumn *column2 = hmm2->firstColumn; assert(column1 != NULL); assert(column2 != NULL); stRPMergeColumn *mColumn = NULL; while(1) { // Check columns aligned assert(column1->refStart == column2->refStart); assert(column1->length == column2->length); // Create the new column // Depth int64_t newColumnDepth = column1->depth+column2->depth; if(newColumnDepth > hmm->maxDepth) { hmm->maxDepth = newColumnDepth; } // Seq headers stProfileSeq **seqHeaders = st_malloc(sizeof(stProfileSeq *) * newColumnDepth); memcpy(seqHeaders, column1->seqHeaders, sizeof(stProfileSeq *) * column1->depth); memcpy(&seqHeaders[column1->depth], column2->seqHeaders, sizeof(stProfileSeq *) * column2->depth); // Profiles uint8_t **seqs = st_malloc(sizeof(uint8_t *) * newColumnDepth); memcpy(seqs, column1->seqs, sizeof(uint8_t *) * column1->depth); memcpy(&seqs[column1->depth], column2->seqs, sizeof(uint8_t *) * column2->depth); stRPColumn *column = stRPColumn_construct(column1->refStart, column1->length, newColumnDepth, seqHeaders, seqs, hmm->referencePriorProbs); // If the there is a previous column if(mColumn != NULL) { mColumn->nColumn = column; column->pColumn = mColumn; } else { hmm->firstColumn = column; assert(column->pColumn == NULL); } // Create cross product of columns stRPCell **pCell = &column->head; stRPCell *cell1 = column1->head; // includeInvertedPartitions forces that the partition and its inverse are included // in the resulting combine hmm. if(hmm->parameters->includeInvertedPartitions) { stHash *seen = stHash_construct3(intHashFn, intEqualsFn, NULL, NULL); do { stRPCell *cell2 = column2->head; do { uint64_t partition = mergePartitionsOrMasks(cell1->partition, cell2->partition, column1->depth, column2->depth); // We have not seen the combined partition before if(stHash_search(seen, &partition) == NULL) { // Add the partition to the column pCell = makeCell(partition, pCell, seen); // Check if the column has non-zero depth and only add the inverse partition if it does // because if zero length the inverse partition is the same as for the forward, and therefore // a duplicate if(newColumnDepth > 0) { uint64_t invertedPartition = invertPartition(partition, newColumnDepth); assert(stHash_search(seen, &invertedPartition) == NULL); pCell = makeCell(invertedPartition, pCell, seen); } } } while((cell2 = cell2->nCell) != NULL); } while((cell1 = cell1->nCell) != NULL); // Cleanup stHash_destruct(seen); } // If not forcing symmetry else { do { stRPCell *cell2 = column2->head; do { stRPCell *cell = stRPCell_construct(mergePartitionsOrMasks(cell1->partition, cell2->partition, column1->depth, column2->depth)); // Link cells *pCell = cell; pCell = &cell->nCell; } while((cell2 = cell2->nCell) != NULL); } while((cell1 = cell1->nCell) != NULL); } // Get the next merged column stRPMergeColumn *mColumn1 = column1->nColumn; stRPMergeColumn *mColumn2 = column2->nColumn; // If column is NULL, we have reached the last column // and we can exit if(mColumn1 == NULL) { assert(mColumn2 == NULL); assert(hmm1->lastColumn == column1); assert(hmm2->lastColumn == column2); // Set the last column pointer hmm->lastColumn = column; break; } // Create new merged column uint64_t fromMask = mergePartitionsOrMasks(mColumn1->maskFrom, mColumn2->maskFrom, mColumn1->pColumn->depth, mColumn2->pColumn->depth); uint64_t toMask = mergePartitionsOrMasks(mColumn1->maskTo, mColumn2->maskTo, mColumn1->nColumn->depth, mColumn2->nColumn->depth); assert(popcount64(fromMask) == popcount64(toMask)); mColumn = stRPMergeColumn_construct(fromMask, toMask); // Connect links mColumn->pColumn = column; column->nColumn = mColumn; // Create cross product of merged columns stHashIterator *cellIt1 = stHash_getIterator(mColumn1->mergeCellsFrom); stRPMergeCell *mCell1; while((mCell1 = stHash_getNext(cellIt1)) != NULL) { stHashIterator *cellIt2 = stHash_getIterator(mColumn2->mergeCellsFrom); stRPMergeCell *mCell2; while((mCell2 = stHash_getNext(cellIt2)) != NULL) { uint64_t fromPartition = mergePartitionsOrMasks(mCell1->fromPartition, mCell2->fromPartition, mColumn1->pColumn->depth, mColumn2->pColumn->depth); uint64_t toPartition = mergePartitionsOrMasks(mCell1->toPartition, mCell2->toPartition, mColumn1->nColumn->depth, mColumn2->nColumn->depth); assert(popcount64(fromPartition) == popcount64(toPartition)); // includeInvertedPartitions forces that the partition and its inverse are included // in the resulting combined hmm. if(hmm->parameters->includeInvertedPartitions) { if(stHash_search(mColumn->mergeCellsFrom, &fromPartition) == NULL) { stRPMergeCell_construct(fromPartition, toPartition, mColumn); // If the mask includes no sequences then the the inverted will be identical, so we check // to avoid adding the same partition twice if(popcount64(fromMask) > 0) { uint64_t invertedFromPartition = mColumn->maskFrom & invertPartition(fromPartition, mColumn1->pColumn->depth + mColumn2->pColumn->depth); uint64_t invertedToPartition = mColumn->maskTo & invertPartition(toPartition, mColumn1->nColumn->depth + mColumn2->nColumn->depth); stRPMergeCell_construct(invertedFromPartition, invertedToPartition, mColumn); } } } else { stRPMergeCell_construct(fromPartition, toPartition, mColumn); } } stHash_destructIterator(cellIt2); } stHash_destructIterator(cellIt1); // Get next column column1 = mColumn1->nColumn; column2 = mColumn2->nColumn; assert(column1 != NULL); assert(column2 != NULL); } return hmm; } static void stRPHmm_initialiseProbs(stRPHmm *hmm) { /* * Initialize the forward and backward matrices. */ // Initialize total forward and backward probabilities hmm->forwardLogProb = ST_MATH_LOG_ZERO; hmm->backwardLogProb = ST_MATH_LOG_ZERO; // Iterate through columns from first to last stRPColumn *column = hmm->firstColumn; while(1) { // Set total log prob column->totalLogProb = ST_MATH_LOG_ZERO; // Initialise cells in the column stRPCell *cell = column->head; do { cell->forwardLogProb = ST_MATH_LOG_ZERO; cell->backwardLogProb = ST_MATH_LOG_ZERO; } while((cell = cell->nCell) != NULL); if(column->nColumn == NULL) { break; } // Initialise cells in the next merge column stList *mergeCells = stHash_getValues(column->nColumn->mergeCellsFrom); for(int64_t i=0; i<stList_length(mergeCells); i++) { stRPMergeCell *mergeCell = stList_get(mergeCells, i); mergeCell->forwardLogProb = ST_MATH_LOG_ZERO; mergeCell->backwardLogProb = ST_MATH_LOG_ZERO; } stList_destruct(mergeCells); column = column->nColumn->nColumn; } } static inline void forwardCellCalc1(stRPHmm *hmm, stRPColumn *column, stRPCell *cell, uint64_t *bitCountVectors) { // If the previous merge column exists then propagate forward probability from merge state if(column->pColumn != NULL) { stRPMergeCell *mCell = stRPMergeColumn_getPreviousMergeCell(cell, column->pColumn); cell->forwardLogProb = mCell->forwardLogProb; } // Otherwise initialize probability with log(1.0) else { cell->forwardLogProb = ST_MATH_LOG_ONE; } // Calculate the emission prob double emissionProb = emissionLogProbability(column, cell, bitCountVectors, hmm->referencePriorProbs, (stRPHmmParameters *)hmm->parameters); // Add emission prob to forward log prob cell->forwardLogProb += emissionProb; // Store the emission probability for the cell in the backwardLogProb field temporarily // (is corrected during the backward pass) cell->backwardLogProb = emissionProb; } static inline void forwardCellCalc2(stRPHmm *hmm, stRPColumn *column, stRPCell *cell) { // If the next merge column exists then propagate forward probability to the merge state if (column->nColumn != NULL) { // Add to the next merge cell stRPMergeCell *mCell = stRPMergeColumn_getNextMergeCell(cell, column->nColumn); mCell->forwardLogProb = logAddP(mCell->forwardLogProb, cell->forwardLogProb, hmm->parameters->maxNotSumTransitions); } else { // Else propagate probability to total forward probability of model hmm->forwardLogProb = logAddP(hmm->forwardLogProb, cell->forwardLogProb, hmm->parameters->maxNotSumTransitions); } } static void stRPHmm_forward(stRPHmm *hmm) { /* * Forward algorithm for hmm. */ stRPColumn *column = hmm->firstColumn; // Iterate through columns from first to last while(1) { // Get the bit count vectors for the column uint64_t *bitCountVectors = calculateCountBitVectors(column->seqs, column->depth, column->activePositions, column->totalActivePositions); // Iterate through states in column stRPCell *cell = column->head; // If OpenMP is available then parallelize the calculation of the emission calcs #if defined(_OPENMP) stRPCell *cells[CELL_BUFFER_SIZE]; do { // Get as many cells as the buffer will fit / there are cells int64_t cellsInBuffer=0; do { cells[cellsInBuffer++] = cell; } while((cell = cell->nCell) != NULL && cellsInBuffer < CELL_BUFFER_SIZE); #pragma omp parallel { #pragma omp for for(int64_t i=0; i<cellsInBuffer; i++) { forwardCellCalc1(hmm, column, cells[i], bitCountVectors); } } for(int64_t i=0; i<cellsInBuffer; i++) { forwardCellCalc2(hmm, column, cells[i]); } } while(cell != NULL); #else // Otherwise do it without the need for the cell buffer do { forwardCellCalc1(hmm, column, cell, bitCountVectors); forwardCellCalc2(hmm, column, cell); } while((cell = cell->nCell) != NULL); #endif // Cleanup the bit count vectors free(bitCountVectors); if(column->nColumn == NULL) { break; } column = column->nColumn->nColumn; } } static inline void backwardCellCalc(stRPHmm *hmm, stRPColumn *column, stRPCell *cell) { // Retrieve the emission probability that was stored by the forward pass double probabilityToPropagateLogProb = cell->backwardLogProb; // If the next merge column exists then propagate backward probability from merge state if(column->nColumn != NULL) { stRPMergeCell *mCell = stRPMergeColumn_getNextMergeCell(cell, column->nColumn); cell->backwardLogProb = mCell->backwardLogProb; probabilityToPropagateLogProb += mCell->backwardLogProb; } else { // Else set the backward prob to log(1) cell->backwardLogProb = ST_MATH_LOG_ONE; } // If the previous merge column exists then propagate backward probability to the merge state if(column->pColumn != NULL) { // Add to the previous merge cell stRPMergeCell *mCell = stRPMergeColumn_getPreviousMergeCell(cell, column->pColumn); mCell->backwardLogProb = logAddP(mCell->backwardLogProb, probabilityToPropagateLogProb, hmm->parameters->maxNotSumTransitions); } else { hmm->backwardLogProb = logAddP(hmm->backwardLogProb, probabilityToPropagateLogProb, hmm->parameters->maxNotSumTransitions); } // Add to column total probability column->totalLogProb = logAddP(column->totalLogProb, cell->forwardLogProb + cell->backwardLogProb, hmm->parameters->maxNotSumTransitions); } static void stRPHmm_backward(stRPHmm *hmm) { /* * Backward algorithm for hmm. */ stRPColumn *column = hmm->lastColumn; // Iterate through columns from last to first while(1) { // Iterate through states in column stRPCell *cell = column->head; do { backwardCellCalc(hmm, column, cell); } while((cell = cell->nCell) != NULL); if(column->pColumn == NULL) { break; } column = column->pColumn->pColumn; } } void stRPHmm_forwardBackward(stRPHmm *hmm) { /* * Runs the forward and backward algorithms and sets the total column probabilities. * * This function must be run upon an HMM to calculate cell posterior probabilities. */ // Initialise state values stRPHmm_initialiseProbs(hmm); // Run the forward and backward passes stRPHmm_forward(hmm); stRPHmm_backward(hmm); } static int cellCmpFn(const void *a, const void *b, const void *extraArg) { /* * Sort cells by posterior probability in descending order. */ stRPCell *cell1 = (stRPCell *)a, *cell2 = (stRPCell *)b; stRPColumn *column = (stRPColumn *)extraArg; double p1 = stRPCell_posteriorProb(cell1, column), p2 = stRPCell_posteriorProb(cell2, column); return p1 > p2 ? -1 : p1 < p2 ? 1 : 0; } static int mergeCellCmpFn(const void *a, const void *b, const void *extraArg) { /* * Sort merge cells by posterior probability in descending order. */ stRPMergeCell *cell1 = (stRPMergeCell *)a, *cell2 = (stRPMergeCell *)b; stRPMergeColumn *column = (stRPMergeColumn *)extraArg; double p1 = stRPMergeCell_posteriorProb(cell1, column), p2 = stRPMergeCell_posteriorProb(cell2, column); return p1 > p2 ? -1 : p1 < p2 ? 1 : 0; } void filterMergeCells(stRPMergeColumn *mColumn, stSet *chosenMergeCellsSet) { /* * Removes merge cells from the column that are not in chosenMergeCellsSet */ assert(stSet_size(chosenMergeCellsSet) > 0); stList *mergeCells = stHash_getValues(mColumn->mergeCellsFrom); for(int64_t i=0; i<stList_length(mergeCells); i++) { stRPMergeCell *mCell = stList_get(mergeCells, i); assert(mCell != NULL); if(stSet_search(chosenMergeCellsSet, mCell) == NULL) { // Remove the state from the merge column assert(stHash_search(mColumn->mergeCellsFrom, &(mCell->fromPartition)) == mCell); assert(stHash_search(mColumn->mergeCellsTo, &(mCell->toPartition)) == mCell); stHash_remove(mColumn->mergeCellsFrom, &(mCell->fromPartition)); stHash_remove(mColumn->mergeCellsTo, &(mCell->toPartition)); // Cleanup stRPMergeCell_destruct(mCell); } } stList_destruct(mergeCells); assert(stSet_size(chosenMergeCellsSet) == stHash_size(mColumn->mergeCellsFrom)); assert(stSet_size(chosenMergeCellsSet) == stHash_size(mColumn->mergeCellsTo)); } stSet *getLinkedMergeCells(stRPMergeColumn *mColumn, stRPMergeCell *(*getNCell)(stRPCell *, stRPMergeColumn *), stList *cells) { /* * Returns the set of merge cells in the column that are linked to a cell * in cells. */ stSet *chosenMergeCellsSet = stSet_construct(); for(int64_t i=0; i<stList_length(cells); i++) { stRPMergeCell *mCell = getNCell(stList_get(cells, i), mColumn); assert(mCell != NULL); stSet_insert(chosenMergeCellsSet, mCell); } assert(stSet_size(chosenMergeCellsSet) > 0); return chosenMergeCellsSet; } void relinkCells(stRPColumn *column, stList *cells) { /* * Re-links the cells in the list 'cells' to make up the list of cells in the column. */ stRPCell **pCell = &column->head; // Pointer to previous cell, used to // remove cells from the linked list for(int64_t i=0; i<stList_length(cells); i++) { stRPCell *cell = stList_get(cells, i); *pCell = cell; pCell = &cell->nCell; } *pCell = NULL; assert(column->head != NULL); } stList *getLinkedCells(stRPColumn *column, stRPMergeCell *(*getPCell)(stRPCell *, stRPMergeColumn *), stRPMergeColumn *mColumn) { /* * Returns the set of cells in column that are linked to a cell in mColumn. */ // Put cells into an array and sort by descending posterior prob // only keeping cells that still have a preceding merge cell stList *cells = stList_construct(); stRPCell *cell = column->head; do { if(mColumn == NULL || getPCell(cell, mColumn) != NULL) { stList_append(cells, cell); cell = cell->nCell; } else { stRPCell *nCell = cell->nCell; stRPCell_destruct(cell); cell = nCell; } } while(cell != NULL); stList_sort2(cells, cellCmpFn, column); assert(stList_length(cells) > 0); return cells; } void stRPHmm_pruneForwards(stRPHmm *hmm) { /* * Remove cells from hmm whos posterior probability is below the given threshold */ // For each column stRPColumn *column = hmm->firstColumn; stRPMergeColumn *mColumn = NULL; while(1) { assert(column->head != NULL); // Get cells that have a valid previous cell stList *cells = getLinkedCells(column, stRPMergeColumn_getPreviousMergeCell, mColumn); // Get rid of the excess cells while(stList_length(cells) > hmm->parameters->minPartitionsInAColumn && (stList_length(cells) > hmm->parameters->maxPartitionsInAColumn || stRPCell_posteriorProb(stList_peek(cells), column) < hmm->parameters->minPosteriorProbabilityForPartition)) { stRPCell_destruct(stList_pop(cells)); } // Relink the cells (from most probable to least probable) relinkCells(column, cells); // Move on to the next merge column mColumn = column->nColumn; if(mColumn == NULL) { assert(column == hmm->lastColumn); stList_destruct(cells); break; } // Get merge cells that are connected to a cell in the previous column stSet *chosenMergeCellsSet = getLinkedMergeCells(mColumn, stRPMergeColumn_getNextMergeCell, cells); // Shrink the the number of chosen cells to less than equal to the desired number stList *chosenMergeCellsList = stSet_getList(chosenMergeCellsSet); stList_sort2(chosenMergeCellsList, mergeCellCmpFn, mColumn); while(stList_length(chosenMergeCellsList) > hmm->parameters->minPartitionsInAColumn && (stList_length(chosenMergeCellsList) > hmm->parameters->maxPartitionsInAColumn || stRPMergeCell_posteriorProb(stList_peek(chosenMergeCellsList), mColumn) < hmm->parameters->minPosteriorProbabilityForPartition)) { stSet_remove(chosenMergeCellsSet, stList_pop(chosenMergeCellsList)); } assert(stList_length(chosenMergeCellsList) == stSet_size(chosenMergeCellsSet)); stList_destruct(chosenMergeCellsList); // Get rid of merge cells we don't need filterMergeCells(mColumn, chosenMergeCellsSet); // Cleanup stList_destruct(cells); stSet_destruct(chosenMergeCellsSet); column = mColumn->nColumn; } } void stRPHmm_pruneBackwards(stRPHmm *hmm) { /* * Remove cells from hmm whos posterior probability is below the given threshold */ // For each column stRPColumn *column = hmm->lastColumn; stRPMergeColumn *mColumn = NULL; while(1) { assert(column->head != NULL); // Get cells that have a valid previous cell stList *cells = getLinkedCells(column, stRPMergeColumn_getNextMergeCell, mColumn); // This must be true because the forward pass has already winnowed the number below the // threshold assert(stList_length(cells) <= hmm->parameters->maxPartitionsInAColumn); // Relink the cells (from most probable to least probable) relinkCells(column, cells); // Move on to the next merge column mColumn = column->pColumn; if(mColumn == NULL) { assert(column == hmm->firstColumn); stList_destruct(cells); break; } // Get merge cells that are connected to a cell in the previous column stSet *chosenMergeCellsSet = getLinkedMergeCells(mColumn, stRPMergeColumn_getPreviousMergeCell, cells); // By the same logic, this number if pruned on the forwards pass assert(stSet_size(chosenMergeCellsSet) <= hmm->parameters->maxPartitionsInAColumn); // Get rid of merge cells we don't need filterMergeCells(mColumn, chosenMergeCellsSet); // Cleanup stList_destruct(cells); stSet_destruct(chosenMergeCellsSet); column = mColumn->pColumn; } } void stRPHmm_prune(stRPHmm *hmm) { stRPHmm_pruneForwards(hmm); stRPHmm_pruneBackwards(hmm); } bool stRPHmm_overlapOnReference(stRPHmm *hmm1, stRPHmm *hmm2) { /* * Return non-zero iff hmm1 and hmm2 have the same reference sequence and overlapping * coordinates intervals on that reference sequence. */ // If either interval is zero length this is not a well defined comparison if(hmm1->refLength <= 0 || hmm2->refLength <= 0) { st_errAbort("Trying to compare HMMs with a zero length coordinate interval"); } // Check if on the same reference sequence if(!stString_eq(hmm1->referenceName, hmm2->referenceName)) { return 0; } // Check if intervals overlap // If hmm1 starts after hmm2's start coordinate then switch hmm1 for hmm2 if(hmm1->refStart > hmm2->refStart) { return stRPHmm_overlapOnReference(hmm2, hmm1); } // The coordinates of the first interval overlap the second return hmm1->refStart + hmm1->refLength > hmm2->refStart; } static stRPColumn *getColumn(stRPColumn *column, int64_t site) { /* * Returns column containing the given reference position, starting from the linked, preceding column "column". */ assert(column != NULL); while(1) { assert(site >= column->refStart); if(site < column->refStart + column->length) { return column; } if(column->nColumn == NULL) { break; } column = column->nColumn->nColumn; } st_errAbort("Site: %" PRIi64 " not contained in hmm\n", site); return column; } void stRPHmm_resetColumnNumberAndDepth(stRPHmm *hmm) { /* * Walk through the hmm calculate and set the maxDepth and column number. */ hmm->columnNumber = 0; hmm->maxDepth = 0; stRPColumn *column = hmm->firstColumn; while(1) { hmm->columnNumber++; if(hmm->maxDepth < column->depth) { hmm->maxDepth = column->depth; } if(column->nColumn == NULL) { break; } column = column->nColumn->nColumn; } } stRPHmm *stRPHmm_split(stRPHmm *hmm, int64_t splitPoint) { /* * Splits the hmm into two at the specified point, given by the reference coordinate splitPiunt. The return value * is the suffix of the split, whose reference start is splitPoint. * The prefix of the split is the input hmm, which has its suffix cleaved off. Its length is then splitPoint-hmm->refStart. */ if(splitPoint <= hmm->refStart) { st_errAbort("The split point is at or before the start of the reference interval\n"); } assert(splitPoint < hmm->refStart + hmm->refLength); if(splitPoint >= hmm->refStart + hmm->refLength) { st_errAbort("The split point %" PRIi64 " is after the last position of the reference interval\n", splitPoint); } stRPHmm *suffixHmm = st_calloc(1, sizeof(stRPHmm)); // Set the reference interval for the two hmms suffixHmm->referenceName = stString_copy(hmm->referenceName); suffixHmm->refStart = splitPoint; suffixHmm->refLength = hmm->refLength + hmm->refStart - splitPoint; hmm->refLength = splitPoint - hmm->refStart; assert(hmm->refLength > 0); assert(suffixHmm->refLength > 0); // Parameters suffixHmm->parameters = hmm->parameters; // Reference prior probabilities suffixHmm->referencePriorProbs = hmm->referencePriorProbs; // Divide the profile sequences between the two hmms (some may end in both if they span the interval) suffixHmm->profileSeqs = stList_construct(); stList *prefixProfileSeqs = stList_construct(); for(int64_t i=0; i<stList_length(hmm->profileSeqs); i++) { stProfileSeq *pSeq = stList_get(hmm->profileSeqs, i); if(pSeq->refStart < splitPoint) { stList_append(prefixProfileSeqs, pSeq); } if(pSeq->refStart + pSeq->length > splitPoint) { stList_append(suffixHmm->profileSeqs, pSeq); } } stList_destruct(hmm->profileSeqs); hmm->profileSeqs = prefixProfileSeqs; // Get the column containing the split point stRPColumn *splitColumn = getColumn(hmm->firstColumn, splitPoint); assert(splitColumn != NULL); assert(splitColumn->refStart <= splitPoint); assert(splitPoint < splitColumn->refStart + splitColumn->length); // If the split point is within the column, split the column if(splitPoint > splitColumn->refStart) { stRPColumn_split(splitColumn, splitPoint-splitColumn->refStart, hmm); splitColumn = splitColumn->nColumn->nColumn; assert(splitPoint == splitColumn->refStart); } // Set links between columns suffixHmm->firstColumn = splitColumn; suffixHmm->lastColumn = hmm->lastColumn; hmm->lastColumn = splitColumn->pColumn->pColumn; hmm->lastColumn->nColumn = NULL; stRPMergeColumn_destruct(splitColumn->pColumn); // Cleanup the merge column that is deleted by this pointer setting splitColumn->pColumn = NULL; // Set depth and column numbers stRPHmm_resetColumnNumberAndDepth(hmm); stRPHmm_resetColumnNumberAndDepth(suffixHmm); return suffixHmm; } static bool sitesLinkageIsWellSupported(stRPHmm *hmm, int64_t leftSite, int64_t rightSite) { /* * Returns true if the two sites, specified by reference coordinates leftSite and rightSite, are linked by * hmm->parameters->minReadCoverageToSupportPhasingBetweenHeterozygousSites, otherwise false. */ stRPColumn *leftColumn = getColumn(hmm->firstColumn, leftSite); stRPColumn *rightColumn = getColumn(leftColumn, rightSite); stSet *sequencesInCommon = stRPColumn_getSequencesInCommon(leftColumn, rightColumn); // Condition to determine if well supported by reads bool wellSupported = stSet_size(sequencesInCommon) >= hmm->parameters->minReadCoverageToSupportPhasingBetweenHeterozygousSites; // Cleanup stSet_destruct(sequencesInCommon); return wellSupported; } stList *stRPHMM_splitWherePhasingIsUncertain(stRPHmm *hmm) { /* * Takes the input hmm and splits into a sequence of contiguous fragments covering the same reference interval, * returned as an ordered list of hmm fragments. * Hmms are split where there is insufficient support between heterozygous * sites to support phasing between the two haplotypes. * See sitesLinkageIsWellSupported for details. */ // Run the forward-backward algorithm stRPHmm_forwardBackward(hmm); // Now compute a high probability path through the hmm stList *path = stRPHmm_forwardTraceBack(hmm); // Get two haplotypes for the path through the HMM stGenomeFragment *gF = stGenomeFragment_construct(hmm, path); // Find high confidence heterozygous sites stList *hetSites = stList_construct3(0, (void (*)(void *))stIntTuple_destruct); for(int64_t i=0; i<gF->length; i++) { // If heterozygous site if(gF->haplotypeString1[i] != gF->haplotypeString2[i]) { stList_append(hetSites, stIntTuple_construct1(gF->refStart + i)); } } // Split hmms stList *splitHmms = stList_construct3(0, (void (*)(void *))stRPHmm_destruct2); // For each pair of contiguous het sites if not supported by sufficient reads split the hmm for(int64_t i=0; i<stList_length(hetSites)-1; i++) { int64_t j = stIntTuple_get(stList_get(hetSites, i), 0); int64_t k = stIntTuple_get(stList_get(hetSites, i+1), 0); assert(k > j); // If not well supported by reads if(!sitesLinkageIsWellSupported(hmm, j, k)) { // Split hmm int64_t splitPoint = j+(k-j+1)/2; stRPHmm *rightHmm = stRPHmm_split(hmm, splitPoint); assert(rightHmm->refStart == splitPoint); assert(hmm->refStart + hmm->refLength == splitPoint); // Add prefix of hmm to list of split hmms stList_append(splitHmms, hmm); // Set hmm as right hmm hmm = rightHmm; } } // Add the remaining part of the hmm to split hmms stList_append(splitHmms, hmm); // Cleanup stList_destruct(hetSites); stList_destruct(path); stGenomeFragment_destruct(gF); return splitHmms; } /* * Functions for logging an hmm */ double getExpectedNumberOfMatches(uint64_t *haplotypeString, int64_t start, int64_t length, stProfileSeq *profileSeq) { /* * Returns the expected number of positions in the profile sequence * that are identical to the given haplotype string. */ double totalExpectedMatches = 0.0; for(int64_t i=0; i<profileSeq->length; i++) { // Get base in the haplotype sequence int64_t j = i + profileSeq->refStart - start; if(j >= 0 && j < length) { uint64_t hapBase = haplotypeString[j]; assert(hapBase < ALPHABET_SIZE); // Expectation of a match totalExpectedMatches += getProb(&(profileSeq->profileProbs[i * ALPHABET_SIZE]), hapBase); } } return totalExpectedMatches; } double getExpectedIdentity(uint64_t *haplotypeString, int64_t start, int64_t length, stSet *profileSeqs) { /* * Returns the expected fraction of positions in the profile sequences * that match their corresponding position in the given haplotype string. */ double totalExpectedNumberOfMatches = 0.0; int64_t totalLength = 0; stSetIterator *it = stSet_getIterator(profileSeqs); stProfileSeq *pSeq; while((pSeq = stSet_getNext(it)) != NULL) { totalExpectedNumberOfMatches += getExpectedNumberOfMatches(haplotypeString, start, length, pSeq); totalLength += pSeq->length; } stSet_destructIterator(it); return totalExpectedNumberOfMatches/totalLength; } double getIdentityBetweenHaplotypesExcludingIndels(uint64_t *hap1String, uint64_t *hap2String, int64_t length) { /* * Returns the fraction of positions in two haplotypes that are identical. */ int64_t totalMatches = 0; int64_t numGaps = 0; for(int64_t i=0; i<length; i++) { if(hap1String[i] == hap2String[i]) { totalMatches++; } else if (hap1String[i] == ALPHABET_SIZE-1 || hap2String[i] == ALPHABET_SIZE-1) { numGaps++; } } return ((double)totalMatches)/(length - numGaps); } double *getHaplotypeBaseComposition(uint64_t *hapString, int64_t length) { /* * Get the count of each alphabet character in the haplotype sequence, returned * as an array. */ double *baseCounts = st_calloc(ALPHABET_SIZE, sizeof(double)); for(int64_t i=0; i<length; i++) { baseCounts[hapString[i]] += 1; } return baseCounts; } void printBaseComposition(FILE *fH, double *baseCounts) { /* * Print the counts/fraction of each alphabet character. */ double totalCount = 0; for(int64_t i=0; i<ALPHABET_SIZE; i++) { totalCount += baseCounts[i]; } for(int64_t i=0; i<ALPHABET_SIZE; i++) { fprintf(fH, "\t\tBase %" PRIi64 " count: %f fraction: %f\n", i, baseCounts[i], baseCounts[i]/totalCount); } } double getIdentityBetweenHaplotypes(uint64_t *hap1String, uint64_t *hap2String, int64_t length) { /* * Returns the fraction of positions in two haplotypes that are identical. */ int64_t totalMatches = 0; for(int64_t i=0; i<length; i++) { if(hap1String[i] == hap2String[i]) { totalMatches++; } } return ((double)totalMatches)/length; } double *getExpectedProfileSequenceBaseComposition(stSet *profileSeqs) { /* * Get the expected count of each alphabet character in the profile sequences, returned * as an array. */ double *baseCounts = st_calloc(ALPHABET_SIZE, sizeof(double)); stSetIterator *it = stSet_getIterator(profileSeqs); stProfileSeq *pSeq; while((pSeq = stSet_getNext(it)) != NULL) { for(int64_t i=0; i<pSeq->length; i++) { for(int64_t j=0; j<ALPHABET_SIZE; j++) { baseCounts[j] += getProb(&(pSeq->profileProbs[i*ALPHABET_SIZE]), j); } } } stSet_destructIterator(it); return baseCounts; } void logHmm(stRPHmm *hmm, stSet *reads1, stSet *reads2, stGenomeFragment *gF) { /* * Print debug-level logging information about an HMM and associated genome fragment. */ if(st_getLogLevel() == debug) { st_logDebug("> Creating genome fragment for reference sequence: %s, start: %" PRIi64 ", length: %" PRIi64 "\n", hmm->referenceName, hmm->refStart, hmm->refLength); st_logDebug("\n\tThere are %" PRIi64 " reads covered by the hmm, " "bipartitioned into sets of %" PRIi64 " and %" PRIi64 " reads\n", stList_length(hmm->profileSeqs), stSet_size(reads1), stSet_size(reads2)); // Print the similarity between the two imputed haplotypes sequences st_logDebug("\tThe haplotypes have identity: %f \n", getIdentityBetweenHaplotypes(gF->haplotypeString1, gF->haplotypeString2, gF->length)); st_logDebug("\tIdentity excluding indels: %f \n\n", getIdentityBetweenHaplotypesExcludingIndels(gF->haplotypeString1, gF->haplotypeString2, gF->length)); // Print the base composition of the haplotype sequences double *hap1BaseCounts = getHaplotypeBaseComposition(gF->haplotypeString1, gF->length); st_logDebug("\tThe base composition of haplotype 1:\n"); printBaseComposition(stderr, hap1BaseCounts); free(hap1BaseCounts); double *hap2BaseCounts = getHaplotypeBaseComposition(gF->haplotypeString2, gF->length); st_logDebug("\tThe base composition of haplotype 2:\n"); printBaseComposition(stderr, hap2BaseCounts); free(hap2BaseCounts); // Print the base composition of the reads double *reads1BaseCounts =getExpectedProfileSequenceBaseComposition(reads1); st_logDebug("\tThe base composition of reads1 set:\n"); printBaseComposition(stderr, reads1BaseCounts); free(reads1BaseCounts); double *reads2BaseCounts =getExpectedProfileSequenceBaseComposition(reads2); st_logDebug("\tThe base composition of reads2 set:\n"); printBaseComposition(stderr, reads2BaseCounts); free(reads2BaseCounts); // Print some summary stats about the differences between haplotype sequences and the bipartitioned reads st_logDebug("\thap1 vs. reads1 identity: %f\n", getExpectedIdentity(gF->haplotypeString1, gF->refStart, gF->length, reads1)); st_logDebug("\thap1 vs. reads2 identity: %f\n", getExpectedIdentity(gF->haplotypeString1, gF->refStart, gF->length, reads2)); st_logDebug("\thap2 vs. reads2 identity: %f\n", getExpectedIdentity(gF->haplotypeString2, gF->refStart, gF->length, reads2)); st_logDebug("\thap2 vs. reads1 identity: %f\n", getExpectedIdentity(gF->haplotypeString2, gF->refStart, gF->length, reads1)); } }
convolution_channel.c
/* * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* CSI-NN2 version 1.12.x */ #include "csi_ref.h" static float csi_ref_uint8_to_float_channel(uint8_t i, float scale, int32_t zero_point) { return ((float)i - zero_point) * scale; } static float csi_ref_int8_to_float_channel(int8_t i, float scale, int32_t zero_point) { return ((float)i - zero_point) * scale; } static int channel_kernel_to_common(struct csi_tensor *float_kernel, struct csi_tensor *o_kernel, struct conv2d_params *params) { float *float_kernel_data = float_kernel->data; int kernel_size = csi_tensor_size(o_kernel); for (int i = 0; i < o_kernel->dim[0]; i++) { int per_cahnnel = kernel_size / o_kernel->dim[0]; for (int j = 0; j < per_cahnnel; j++) { int index = i * per_cahnnel + j; if (o_kernel->dtype == CSINN_DTYPE_UINT8) { uint8_t *kernel_data = o_kernel->data; float_kernel_data[index] = csi_ref_uint8_to_float_channel(kernel_data[index], o_kernel->qinfo[i].scale, o_kernel->qinfo[i].zero_point); } else if (o_kernel->dtype == CSINN_DTYPE_INT8) { int8_t *kernel_data = o_kernel->data; float_kernel_data[index] = csi_ref_int8_to_float_channel(kernel_data[index], o_kernel->qinfo[i].scale, o_kernel->qinfo[i].zero_point); } else { return CSINN_FALSE; } } } } static void channel_bias_to_common(struct csi_tensor *float_bias, struct csi_tensor *bias, struct csi_tensor *input, struct csi_tensor *kernel) { int32_t *bias_data = bias->data; float *float_bias_data = float_bias->data; int bias_size = csi_tensor_size(bias); for (int i = 0; i < bias_size; i++) { float_bias_data[i] = bias_data[i] * kernel->qinfo[i].scale * input->qinfo->scale; } } static int csi_ref_conv2d_channel_nchw_quant(struct csi_tensor *o_input, struct csi_tensor *o_output, struct csi_tensor *o_kernel, struct csi_tensor *o_bias, struct conv2d_params *params) { struct csi_tensor *float_input = csi_ref_convert_float_tensor(o_input); struct csi_tensor *float_kernel = csi_ref_alloc_float_tensor(o_kernel); struct csi_tensor *float_bias = csi_ref_alloc_float_tensor(o_bias); struct csi_tensor *float_output = csi_ref_alloc_float_tensor(o_output); channel_kernel_to_common(float_kernel, o_kernel, params); channel_bias_to_common(float_bias, o_bias, o_input, o_kernel); csi_ref_conv2d_f32(float_input, float_output, float_kernel, float_bias, params); csi_tensor_data_convert(o_output, float_output); csi_ref_conv_free_float_tensor(float_input, float_output, float_kernel, float_bias); return CSINN_TRUE; } static int csi_ref_depthwise_conv2d_channel_nchw_u8(struct csi_tensor *o_input, struct csi_tensor *o_output, struct csi_tensor *o_kernel, struct csi_tensor *o_bias, struct conv2d_params *params) { struct csi_tensor* input; struct csi_tensor* output; struct csi_tensor* kernel; struct csi_tensor* bias = o_bias; input = csi_ref_nchw_to_nhwc_8(o_input); kernel = csi_ref_nchw_to_nhwc_8(o_kernel); output = csi_ref_nchw_to_nhwc_8(o_output); uint8_t *input_data = input->data; uint8_t *output_data = output->data; uint8_t *kernel_data = kernel->data; int32_t *bias_data = bias->data; const int32_t dilation_width_factor = params->dilation_width; const int32_t dilation_height_factor = params->dilation_height; const int32_t batches = input->dim[0]; const int32_t input_depth = input->dim[3]; const int32_t output_depth = output->dim[3]; const int32_t input_height = input->dim[1]; const int32_t input_width = input->dim[2]; const int32_t filter_height = kernel->dim[1]; const int32_t filter_width = kernel->dim[2]; const int32_t output_height = output->dim[1]; const int32_t output_width = output->dim[2]; const int32_t depth_multiplier = output_depth / input_depth; const int32_t input_offset = input->qinfo->zero_point; const int32_t output_offset = output->qinfo->zero_point; const int32_t output_multiplier = output->qinfo->multiplier; const int32_t output_shift = output->qinfo->shift; for (int32_t b = 0; b < batches; ++b) { #pragma omp parallel for num_threads(8) for (int32_t out_y = 0; out_y < output_height; ++out_y) { for (int32_t out_x = 0; out_x < output_width; ++out_x) { for (int32_t ic = 0; ic < input_depth; ++ic) { for (int32_t m = 0; m < depth_multiplier; m++) { const int32_t oc = m + ic * depth_multiplier; const int32_t in_x_origin = (out_x * params->stride_width) - params->pad_left; const int32_t in_y_origin = (out_y * params->stride_height) - params->pad_top; int64_t acc = 0; for (int32_t filter_y = 0; filter_y < filter_height; ++filter_y) { for (int32_t filter_x = 0; filter_x < filter_width; ++filter_x) { const int32_t in_x = in_x_origin + dilation_width_factor * filter_x; const int32_t in_y = in_y_origin + dilation_height_factor * filter_y; // If the location is outside the bounds of the input image, // use zero as a default value. if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height)) { int32_t input_val = input_data[csi_ref_get_index(input->dim, b, in_y, in_x, ic)]; int32_t filter_val = kernel_data[csi_ref_get_index( kernel->dim, ic, filter_y, filter_x, m)]; acc += (filter_val - o_kernel->qinfo[oc].zero_point) * (input_val - input_offset); } } } if (bias->dim_count != 0) { acc += bias_data[oc]; } uint8_t out = csi_ref_quantize_channel_u8(acc, input, output, o_kernel->qinfo[oc].scale); output_data[csi_ref_get_index(output->dim, b, out_y, out_x, oc)] = out; } } } } } csi_ref_nhwc_to_nchw_8(o_output, output); csi_mem_free(input->data); csi_mem_free(input); csi_mem_free(kernel->data); csi_mem_free(kernel); return CSINN_TRUE; } static int csi_ref_depthwise_conv2d_channel_nchw_i8(struct csi_tensor *o_input, struct csi_tensor *o_output, struct csi_tensor *o_kernel, struct csi_tensor *o_bias, struct conv2d_params *params) { struct csi_tensor* input; struct csi_tensor* output; struct csi_tensor* kernel; struct csi_tensor* bias = o_bias; input = csi_ref_nchw_to_nhwc_8(o_input); kernel = csi_ref_nchw_to_nhwc_8(o_kernel); output = csi_ref_nchw_to_nhwc_8(o_output); int8_t *input_data = input->data; int8_t *output_data = output->data; int8_t *kernel_data = kernel->data; int32_t *bias_data = bias->data; const int32_t dilation_width_factor = params->dilation_width; const int32_t dilation_height_factor = params->dilation_height; const int32_t batches = input->dim[0]; const int32_t input_depth = input->dim[3]; const int32_t output_depth = output->dim[3]; const int32_t input_height = input->dim[1]; const int32_t input_width = input->dim[2]; const int32_t filter_height = kernel->dim[1]; const int32_t filter_width = kernel->dim[2]; const int32_t output_height = output->dim[1]; const int32_t output_width = output->dim[2]; const int32_t depth_multiplier = output_depth / input_depth; const int32_t input_offset = input->qinfo->zero_point; const int32_t output_offset = output->qinfo->zero_point; const int32_t output_multiplier = output->qinfo->multiplier; const int32_t output_shift = output->qinfo->shift; for (int32_t b = 0; b < batches; ++b) { #pragma omp parallel for num_threads(8) for (int32_t out_y = 0; out_y < output_height; ++out_y) { for (int32_t out_x = 0; out_x < output_width; ++out_x) { for (int32_t ic = 0; ic < input_depth; ++ic) { for (int32_t m = 0; m < depth_multiplier; m++) { const int32_t oc = m + ic * depth_multiplier; const int32_t in_x_origin = (out_x * params->stride_width) - params->pad_left; const int32_t in_y_origin = (out_y * params->stride_height) - params->pad_top; int64_t acc = 0; for (int32_t filter_y = 0; filter_y < filter_height; ++filter_y) { for (int32_t filter_x = 0; filter_x < filter_width; ++filter_x) { const int32_t in_x = in_x_origin + dilation_width_factor * filter_x; const int32_t in_y = in_y_origin + dilation_height_factor * filter_y; // If the location is outside the bounds of the input image, // use zero as a default value. if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height)) { int32_t input_val = input_data[csi_ref_get_index(input->dim, b, in_y, in_x, ic)]; int32_t filter_val = kernel_data[csi_ref_get_index( kernel->dim, ic, filter_y, filter_x, m)]; acc += (filter_val - o_kernel->qinfo[oc].zero_point) * (input_val - input_offset); } } } if (bias->dim_count != 0) { acc += bias_data[oc]; } int8_t out = csi_ref_quantize_channel_i8(acc, input, output, o_kernel->qinfo[oc].scale); output_data[csi_ref_get_index(output->dim, b, out_y, out_x, oc)] = out; } } } } } csi_ref_nhwc_to_nchw_8(o_output, output); csi_mem_free(input->data); csi_mem_free(input); csi_mem_free(kernel->data); csi_mem_free(kernel); return CSINN_TRUE; } static int csi_ref_group_conv2d_channel_nchw_quant(struct csi_tensor *o_input, struct csi_tensor *o_output, struct csi_tensor *o_kernel, struct csi_tensor *o_bias, struct conv2d_params *params) { struct csi_tensor *input = csi_alloc_tensor(NULL); struct csi_tensor *output = csi_alloc_tensor(NULL); struct csi_tensor *kernel = csi_alloc_tensor(NULL); struct csi_tensor *bias = csi_alloc_tensor(NULL); struct conv2d_params pparams; csi_tensor_copy(input, o_input); csi_tensor_copy(output, o_output); csi_tensor_copy(kernel, o_kernel); csi_tensor_copy(bias, o_bias); memcpy(&pparams, params, sizeof(struct conv2d_params)); input->dim[1] /= params->group; output->dim[1] /= params->group; kernel->dim[0] /= params->group; bias->dim[0] /= params->group; pparams.group = 1; int input_size = csi_tensor_size(input); int output_size = csi_tensor_size(output); int kernel_size = csi_tensor_size(kernel); int8_t *input_data = o_input->data; int8_t *output_data = o_output->data; int8_t *kernel_data = o_kernel->data; int32_t *bias_data = o_bias->data; for (int i = 0; i < params->group; i++) { input->data = input_data + i * input_size; output->data = output_data + i * output_size; kernel->data = kernel_data + i * kernel_size; if (bias->data && bias->dim_count != 0) { bias->data = bias_data + i * o_output->dim[1] / params->group; } kernel->qinfo = o_kernel->qinfo + i * o_output->dim[1] / params->group; csi_ref_conv2d_channel_nchw_quant(input, output, kernel, bias, &pparams); } return CSINN_TRUE; } int csi_ref_conv2d_channel_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { if (params->base.layout == CSINN_LAYOUT_NCHW) { csi_ref_conv2d_channel_nchw_quant(input, output, kernel, bias, params); } else { return CSINN_UNSUPPORT_LAYOUT; } } int csi_ref_conv2d_channel_relu_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { csi_ref_conv2d_channel_quant(input, output, kernel, bias, params); struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params)); memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base)); csi_relu_init(output, output, rp); csi_relu(output, output, rp); return CSINN_TRUE; } int csi_ref_conv2d_channel_relu6_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { csi_ref_conv2d_channel_quant(input, output, kernel, bias, params); struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params)); memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base)); csi_relu6_init(output, output, rp); csi_relu6(output, output, rp); return CSINN_TRUE; } int csi_ref_depthwise_conv2d_channel_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { if (params->base.layout == CSINN_LAYOUT_NCHW) { if (input->dtype == CSINN_DTYPE_UINT8) { csi_ref_depthwise_conv2d_channel_nchw_u8(input, output, kernel, bias, params); } else if (input->dtype == CSINN_DTYPE_INT8) { csi_ref_depthwise_conv2d_channel_nchw_i8(input, output, kernel, bias, params); } else { return CSINN_UNSUPPORT_DTYPE; } } else { return CSINN_UNSUPPORT_LAYOUT; } } int csi_ref_depthwise_conv2d_channel_relu_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { csi_ref_depthwise_conv2d_channel_quant(input, output, kernel, bias, params); struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params)); memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base)); csi_relu_init(output, output, rp); csi_relu(output, output, rp); } int csi_ref_depthwise_conv2d_channel_relu6_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { csi_ref_depthwise_conv2d_channel_quant(input, output, kernel, bias, params); struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params)); memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base)); csi_relu6_init(output, output, rp); csi_relu6(output, output, rp); } int csi_ref_group_conv2d_channel_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { if (params->base.layout == CSINN_LAYOUT_NCHW) { csi_ref_group_conv2d_channel_nchw_quant(input, output, kernel, bias, params); } else { return CSINN_UNSUPPORT_LAYOUT; } } int csi_ref_group_conv2d_channel_relu_quant(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { csi_ref_group_conv2d_channel_quant(input, output, kernel, bias, params); struct relu_params *rp = csi_mem_alloc(sizeof(struct relu_params)); memcpy(&(rp->base), &(params->base), sizeof(struct csi_params_base)); csi_relu_init(output, output, rp); csi_relu(output, output, rp); }
ocp_nlp_sqp_rti.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp_rti.h" // external #include <assert.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ acados_size_t ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_rti_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_constraints_config **constraints = config->constraints; // int ii; // int N = dims->N; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP RTI opts opts->ext_qp_res = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; // overwrite default submodules opts // do not compute adjoint in dynamics and constraints // int compute_adj = 0; // // dynamics // for (ii = 0; ii < N; ii++) // { // dynamics[ii]->opts_set(dynamics[ii], // opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj); // } // // constraints // for (ii = 0; ii <= N; ii++) // { // constraints[ii]->opts_set(constraints[ii], // opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj); // } return; } void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_config *config = config_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 2) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0, 1, 2\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = 1+1; int stat_n = 2; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int ii; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = 1+1; mem->stat_n = 2; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size( config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; acados_size_t size = 0; // sqp size += sizeof(ocp_nlp_sqp_rti_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_rti_cast_workspace( ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_rti_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign( config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign( dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size( dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_sqp_rti_memory *mem = mem_; // zero timers acados_timer timer0; double total_time = 0.0; mem->time_tot = 0.0; ocp_nlp_sqp_rti_opts *nlp_opts = opts_; int rti_phase = nlp_opts->rti_phase; acados_tic(&timer0); switch(rti_phase) { // perform preparation and feedback rti_phase case 0: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform preparation rti_phase case 1: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform feedback rti_phase case 2: ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; } total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; return mem->status; } void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; mem->time_lin = 0.0; mem->time_reg = 0.0; int N = dims->N; int ii; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr( nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr( nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr( nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr( nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr( nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr( nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr( nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr( nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr( nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr( nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr( nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr( nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr( nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr( nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr( dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr( dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr( dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr( dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr( dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr( dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr( dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr( dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr( dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); /* SQP body */ int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif return; } void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int qp_iter = 0; int qp_status = 0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_glob = 0.0; // embed initial value (this actually updates all bounds at stage 0...) ocp_nlp_embed_initial_value(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); if (opts->print_level > 0) { printf("\n------- qp_in --------\n"); print_ocp_qp_in(nlp_mem->qp_in); } if (!opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; qp_iter = qp_info_->num_iter; // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, // inf_norm_qp_res[0], inf_norm_qp_res[1], // inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(nlp_mem->qp_out); // exit(1); // save statistics mem->stat[mem->stat_n*1+0] = qp_status; mem->stat[mem->stat_n*1+1] = qp_iter; if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(mem->qp_in); #ifndef ACADOS_SILENT printf("\nSQP_RTI: QP solver returned error status %d QP iteration %d.\n", qp_status, qp_iter); #endif if (opts->print_level > 0) { printf("\n Failed to solve the following QP:\n"); print_ocp_qp_in(nlp_mem->qp_in); } mem->status = ACADOS_QP_FAILURE; return; } // globalization acados_tic(&timer1); double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_glob += acados_toc(&timer1); // update variables ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // print_ocp_qp_in(mem->qp_in); mem->status = ACADOS_SUCCESS; } int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(giaf) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \ for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { acados_timer timer0; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in \ ocp_nlp_sqp_rti_eval_param_sens\n", field, stage); exit(1); } mem->time_solution_sensitivities = acados_toc(&timer0); return; } // TODO rename memory_get ??? void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_rti_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = 1; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_glob", field)) { double *value = return_value_; *value = mem->time_glob; } else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field)) { double tmp = 0.0; double *ptr = return_value_; int N = dims->N; int ii; *ptr = 0.0; for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp); *ptr += tmp; } } else if (!strcmp("time_solution_sensitivities", field)) { double *value = return_value_; *value = mem->time_solution_sensitivities; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = 2; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_work_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_rti_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_rti_opts_update; config->opts_set = &ocp_nlp_sqp_rti_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_rti_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp_rti; config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default; config->precompute = &ocp_nlp_sqp_rti_precompute; config->get = &ocp_nlp_sqp_rti_get; config->opts_get = &ocp_nlp_sqp_rti_opts_get; config->work_get = &ocp_nlp_sqp_rti_work_get; return; }
libimagequant.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif #ifdef _OPENMP #include <omp.h> #define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */ #else #define LIQ_TEMP_ROW_WIDTH(img_width) (img_width) #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "kmeans.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char liq_attr_magic[] = "liq_attr"; static const char liq_image_magic[] = "liq_image"; static const char liq_result_magic[] = "liq_result"; static const char liq_histogram_magic[] = "liq_histogram"; static const char liq_remapping_result_magic[] = "liq_remapping_result"; static const char liq_freed_magic[] = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, kmeans_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int kmeans_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps; unsigned char use_dither_map; unsigned char speed; unsigned char progress_stage1, progress_stage2, progress_stage3; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *importance_map, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; liq_image *background; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; double gamma, palette_error; float dither_level; unsigned char use_dither_map; unsigned char progress_stage1; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; unsigned char use_dither_map; }; struct liq_histogram { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); struct acolorhash_table *acht; double gamma; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; unsigned short ignorebits; bool had_image_added; }; static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL; static void contrast_maps(liq_image *image) LIQ_NONNULL; static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL; static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL; static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL; static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL; static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL; static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL; static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL; LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); LIQ_ARRAY(char, buf, required_space); va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent) { return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info); } LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent) { return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info); } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64)) return true; #elif _MSC_VER int info[4]; __cpuid(info, 1); /* bool is implemented as a built-in type of size 1 in MSVC */ return info[3] & (1<<26) ? true : false; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } /** internally MSE is a sum of all channels with pixels 0..1 range, but other software gives per-RGB-channel MSE for 0..255 range */ static double mse_to_standard_mse(double mse) { return mse * 65536.0/6.0; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2; attr->kmeans_iterations = iterations; attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping if (attr->use_dither_map && speed < 3) { attr->use_dither_map = 2; // always } attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8; if (attr->feedback_loop_trials < 2) { attr->progress_stage1 += 30; } attr->progress_stage3 = 50 / (1+speed); attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255.f, 256.f * attr->min_opaque_val); } LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->progress_callback = callback; attr->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return; result->progress_callback = callback; result->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } LIQ_NONNULL static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 4); return attr; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color) { if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED; hist->fixed_colors[hist->fixed_colors_count++] = color; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER; float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455); const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return liq_histogram_add_fixed_color_f(hist, px); } LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads()); return img->temp_f_row != NULL; } LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image); LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image); LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER; const size_t required_size = img->width * img->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } if (ownership == LIQ_COPY_PIXELS) { unsigned char *tmp = img->malloc(required_size); if (!tmp) { return LIQ_OUT_OF_MEMORY; } memcpy(tmp, importance_map, required_size); importance_map = tmp; } else if (ownership != LIQ_OWN_PIXELS) { return LIQ_UNSUPPORTED; } liq_image_free_importance_map(img); img->importance_map = importance_map; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER; if (background->background) { return LIQ_UNSUPPORTED; } if (img->width != background->width || img->height != background->height) { return LIQ_BUFFER_TOO_SMALL; } if (img->background) { liq_image_destroy(img->background); } img->background = background; liq_image_free_maps(img); // Force them to be re-analyzed with the background return LIQ_OK; } LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *const pixels = (rgba_pixel *const)bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img) { if (!CHECK_STRUCT_TYPE(img, liq_image)) { return false; } return img->rows || (img->temp_row && img->row_callback); } LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img) { assert(liq_image_has_rgba_pixels(img)); const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rgba_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]); } } LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img) { assert(omp_get_thread_num() == 0); if (img->f_pixels) { return true; } if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { return liq_image_use_low_memory(img); } if (!liq_image_has_rgba_pixels(img)) { return false; } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } return true; } LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { assert(img->temp_f_row); // init should have done that float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } return img->f_pixels + img->width * row; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); LIQ_NONNULL static free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) { if (input_image->importance_map) { input_image->free(input_image->importance_map); input_image->importance_map = NULL; } } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) { liq_image_free_importance_map(input_image); if (input_image->edges) { input_image->free(input_image->edges); input_image->edges = NULL; } if (input_image->dither_map) { input_image->free(input_image->dither_map); input_image->dither_map = NULL; } } LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); liq_image_free_maps(input_image); if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } if (input_image->background) { liq_image_destroy(input_image->background); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return NULL; } liq_histogram *hist = attr->malloc(sizeof(liq_histogram)); if (!hist) return NULL; *hist = (liq_histogram) { .magic_header = liq_histogram_magic, .malloc = attr->malloc, .free = attr->free, .ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input), }; return hist; } LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return; hist->magic_header = liq_freed_magic; pam_freeacolorhash(hist->acht); hist->free(hist); } LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { liq_result *res; if (LIQ_OK != liq_image_quantize(img, attr, &res)) { return NULL; } return res; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!liq_image_has_rgba_pixels(img)) { return LIQ_UNSUPPORTED; } liq_histogram *hist = liq_histogram_create(attr); if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_error err = liq_histogram_add_image(hist, attr, img); if (LIQ_OK != err) { return err; } err = liq_histogram_quantize_internal(hist, attr, false, result_output); liq_histogram_destroy(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) { return liq_histogram_quantize_internal(input_hist, attr, true, result_output); } LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) { if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER; *result_output = NULL; if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (liq_progress(attr, 0)) return LIQ_ABORTED; histogram *hist; liq_error err = finalize_histogram(input_hist, attr, &hist); if (err != LIQ_OK) { return err; } err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output); pam_freeacolorhist(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), .progress_callback = result->progress_callback, .progress_callback_user_info = result->progress_callback_user_info, .progress_stage1 = result->use_dither_map ? 20 : 0, }; return res; } LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_standard_mse(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_standard_mse(result->remapping->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return -1; } LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem) { if (!nelem) return; qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.f/256.f) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } unsigned int non_fixed_colors = 0; for(unsigned int i = 0; i < map->colors; i++) { if (map->palette[i].fixed) { break; } non_fixed_colors++; } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent = 0; for(unsigned int i = 0; i < non_fixed_colors; i++) { if (map->palette[i].acolor.a < 255.f/256.f) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent); if (non_fixed_colors > 9 && map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a && !map->palette[x].fixed) { px.r = 71; px.g = 112; px.b = 76; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map) { const int rows = input_image->height; const unsigned int cols = input_image->width; double remapping_error=0; if (!liq_image_get_row_f_init(input_image)) { return -1; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return -1; } const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; const unsigned int max_threads = omp_get_max_threads(); LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads); kmeans_init(map, max_threads, average_color); #if __GNUC__ >= 9 #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap,average_color,cols,input_image,map,n,output_pixels,rows,transparent_index) reduction(+:remapping_error) #else #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error) #endif for(int row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { float diff; last_match = nearest_search(n, &row_pixels[col], last_match, &diff); if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) { last_match = transparent_index; } output_pixels[row][col] = last_match; remapping_error += diff; kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color); } } kmeans_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; const float max_overflow = 1.1f; const float max_underflow = -0.1f; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr); else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); } if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg); else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); } if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb); else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); } float a = px.a + sa; if (a > 1.f) { a = 1.f; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8f; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel) { .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped) { const int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const colormap *map = quant->palette; const colormap_item *acolormap = map->palette; if (!liq_image_get_row_f_init(input_image)) { return false; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return false; } /* Initialize Floyd-Steinberg error vectors. */ const size_t errwidth = cols+2; f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access if (!thiserr) return false; f_pixel *restrict nexterr = thiserr + errwidth; memset(thiserr, 0, errwidth * sizeof(thiserr[0])); bool ok = true; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; // response to this value is non-linear and without it any value < 0.8 would give almost no dithering float base_dithering_level = quant->dither_level; base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level); if (dither_map) { base_dithering_level *= 1.f/255.f; // convert byte to float } base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating int fs_direction = 1; unsigned int last_match=0; for (int row = 0; row < rows; ++row) { if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) { ok = false; break; } memset(nexterr, 0, errwidth * sizeof(nexterr[0])); int col = (fs_direction > 0) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; float diff; last_match = nearest_search(n, &spx, guessed_match, &diff); f_pixel output_px = acolormap[last_match].acolor; if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) { output_px = bg_pixels[col]; output_pixels[row][col] = transparent_index; } else { output_pixels[row][col] = last_match; } f_pixel err = { .r = (spx.r - output_px.r), .g = (spx.g - output_px.g), .b = (spx.b - output_px.b), .a = (spx.a - output_px.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { err.r *= 0.75f; err.g *= 0.75f; err.b *= 0.75f; err.a *= 0.75f; } /* Propagate Floyd-Steinberg error terms. */ if (fs_direction > 0) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag col += fs_direction; if (fs_direction > 0) { if (col >= cols) break; } else { if (col < 0) break; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = -fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); return ok; } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse) { const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f); if (fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER; if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE; if (input_hist->ignorebits > 0 && input_hist->had_image_added) { return LIQ_UNSUPPORTED; } input_hist->ignorebits = 0; input_hist->had_image_added = true; input_hist->gamma = gamma ? gamma : 0.45455; if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free); if (!input_hist->acht) { return LIQ_OUT_OF_MEMORY; } } // Fake image size. It's only for hash size estimates. if (!input_hist->acht->cols) { input_hist->acht->cols = num_entries; } input_hist->acht->rows += num_entries; const unsigned int hash_size = input_hist->acht->hash_size; for(int i=0; i < num_entries; i++) { const rgba_pixel rgba = { .r = entries[i].color.r, .g = entries[i].color.g, .b = entries[i].color.b, .a = entries[i].color.a, }; union rgba_as_int px = {rgba}; unsigned int hash; if (px.rgba.a) { hash = px.l % hash_size; } else { hash=0; px.l=0; } if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) { return LIQ_OUT_OF_MEMORY; } } return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->importance_map && options->use_contrast_maps) { contrast_maps(input_image); } input_hist->gamma = input_image->gamma; for(int i = 0; i < input_image->fixed_colors_count; i++) { liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]); if (res != LIQ_OK) { return res; } } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ if (liq_progress(options, options->progress_stage1 * 0.4f)) { return LIQ_ABORTED; } const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image); // Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not // the first image added const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries; do { if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free); } if (!input_hist->acht) return LIQ_OUT_OF_MEMORY; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL); } if (!added_ok) { input_hist->ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED; break; } } } while(!input_hist->acht); input_hist->had_image_added = true; liq_image_free_importance_map(input_image); if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } return LIQ_OK; } LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) { if (liq_progress(options, options->progress_stage1 * 0.9f)) { return LIQ_ABORTED; } if (!input_hist->acht) { return LIQ_BITMAP_NOT_AVAILABLE; } histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse); *hist_output = hist; return LIQ_OK; } LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ LIQ_NONNULL static void contrast_maps(liq_image *image) { const unsigned int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows); image->importance_map = NULL; unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows); image->edges = NULL; unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) { image->free(noise); image->free(edges); image->free(tmp); return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (unsigned int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (unsigned int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; // 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely). const unsigned int z_int = 85 + (unsigned int)(z * 171.f); noise[j*cols+i] = MIN(z_int, 255); const int e_int = 255 - (int)(edge * 256.f); edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->importance_map = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) { // Transparency may or may not create an edge. When there's an explicit background set, assume no edge. continue; } if (px != lastpixel || col == width-1) { int neighbor_count = 10 * (col-lastcol); unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 15; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 15; } i++; } while(lastcol <= col) { int e = edges[row*width + lastcol]; edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count)); } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } /** * Palette can be NULL, in which case it creates a new palette from scratch. */ static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1); do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time K-Means iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // K-Means iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials); if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break; liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done)); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) { if (!hist->size) { return NULL; } colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } return acolormap; } LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output) { colormap *acolormap; double palette_error = -1; assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1)); const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors; if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error); if (!acolormap) { return LIQ_VALUE_OUT_OF_RANGE; } // K-Means iteration approaches local minimum for the palette double iteration_limit = options->kmeans_iteration_limit; unsigned int iterations = options->kmeans_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;} verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = kmeans_do_iteration(hist, acolormap, NULL); if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) { break; } if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", mse_to_standard_mse(palette_error), mse_to_quality(palette_error), mse_to_standard_mse(max_mse), mse_to_quality(max_mse)); pam_freecolormap(acolormap); return LIQ_QUALITY_TOO_LOW; } } if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) { pam_freecolormap(acolormap); return LIQ_ABORTED; } sort_palette(acolormap, options); // If palette was created from a multi-image histogram, // then it shouldn't be optimized for one image during remapping if (fixed_result_colors) { for(unsigned int i=0; i < acolormap->colors; i++) { acolormap->palette[i].fixed = true; } } liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return LIQ_OUT_OF_MEMORY; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .use_dither_map = options->use_dither_map, .gamma = gamma, .min_posterization_output = options->min_posterization_output, }; *result_output = result; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = input_image->width * input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } LIQ_ARRAY(unsigned char *, rows, input_image->height); unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) { return LIQ_ABORTED; } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette); } else { const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000; const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map); const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette); update_dither_map(input_image, row_pointers, result->palette); } if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) { return LIQ_ABORTED; } // remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) { return LIQ_ABORTED; } } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }
tinyexr.h
/* Copyright (c) 2014 - 2017, Syoyo Fujita All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES]; EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning( \ disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static const int kEXRVersionSize = 8; static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 static const char *ReadString(std::string *s, const char *ptr) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((*q) != 0) q++; (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static void ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; p = ReadString(&info.name, p); memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); assert(ret == miniz::MZ_OK); (void)ret; #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); assert(ret == Z_OK); (void)ret; #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning( \ disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // int hlink[HUF_ENCSIZE]; long long *fHeap[HUF_ENCSIZE]; *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); long long scode[HUF_ENCSIZE]; memset(scode, 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode); memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #define getCode(po, rlc, c, lc, in, out, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; unsigned short *oe = out + no; const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; getCode(pl.p[j], rlc, c, lc, in, out, oe); break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(long long freq[HUF_ENCSIZE], const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; long long freq[HUF_ENCSIZE]; countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq, &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq, im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq, raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, unsigned short raw[], int nRaw) { if (nCompressed == 0) { if (nRaw != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap, minNonZero, maxNonZero); unsigned short lut[USHORT_RANGE]; unsigned short maxValue = forwardLutFromBitmap(bitmap, lut); applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap, 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } unsigned short lut[USHORT_RANGE]; memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap, lut); // // Huffman decoding // int length; length = *(reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // static void DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned short)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + c * static_cast<size_t>(width) * sizeof(float)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { assert(0); } } } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int ReadChannelInfo(info->channels, data); if (info->channels.size() < 1) { if (err) { (*err) = "# of channels is zero."; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } else if (attr_name.compare("displayWindow") == 0) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } else if (attr_name.compare("lineOrder") == 0) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } else if (attr_name.compare("pixelAspectRatio") == 0) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } else if (attr_name.compare("screenWindowCenter") == 0) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } else if (attr_name.compare("screenWindowWidth") == 0) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } else if (attr_name.compare("chunkCount") == 0) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } else { // Custom attribute(up to TINYEXR_MAX_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES); exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels); bool invalid_data = false; if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( malloc(sizeof(EXRTile) * static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } assert(tile_coordinates[2] == 0); assert(tile_coordinates[3] == 0); int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); assert(data_len >= 4); // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; assert(num_lines > 0); // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if (offset >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; // Read offset tables. size_t num_blocks; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { if (err) { (*err) = "Invalid offset value."; } return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { if (err) { (*err) = "Cannot reconstruct lineOffset table."; } return TINYEXR_ERROR_INVALID_DATA; } } } return DecodeChunk(exr_image, exr_header, offsets, head); } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } if (exr_version.multipart || exr_version.non_image) { if (err) { (*err) = "Loading multipart or DeepImage is not supported yet.\n"; } return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { if (err) { (*err) = "Invalid argument.\n"; } // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { #ifdef _WIN32 (*err) = _strdup(err_str.c_str()); // May leak #else (*err) = strdup(err_str.c_str()); // May leak #endif } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { if (err) { (*err) = "Pixel type must be FLOAT for ZFP compression."; } return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = f32.f; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = h16.u; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 1024 + static_cast<unsigned int>( 1.2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { if (err) { (*err) = "Cannot write a file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((!errcode) || (!fp)) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); if (err) { (*err) = "File size is zero."; } return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Invalid magic number."; } return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Unsupported compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int tinyexr::ReadChannelInfo(channels, data); num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { if (err) { (*err) = "Invalid channels format."; } return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { if (err) { (*err) = "Unsupported format."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize)); assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize)); assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui = *reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; f16.u = *reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f = *reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err) { #ifdef _WIN32 (*err) = _strdup(err_str.c_str()); // may leak #else (*err) = strdup(err_str.c_str()); // may leak #endif } return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { if (err) { (*err) = "`chunkCount' attribute is not found in the header."; } return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { if (err) { (*err) = "Invalid offset size."; } return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { assert(0); return TINYEXR_ERROR_INVALID_DATA; } } int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory); if (ret != TINYEXR_SUCCESS) { return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
GB_unaryop__abs_uint64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_int16 // op(A') function: GB_tran__abs_uint64_int16 // C type: uint64_t // A type: int16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_int16 ( uint64_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
7624.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 32) for (t10 = t8; t10 <= (ny - 1 < t8 + 31 ? ny - 1 : t8 + 31); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 32) for (t10 = t8; t10 <= (ny - 1 < t8 + 31 ? ny - 1 : t8 + 31); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 32) for (t10 = t8; t10 <= (ny - 2 < t8 + 31 ? ny - 2 : t8 + 31); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
omp_for_firstprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int sum1; #pragma omp threadprivate(sum1) int test_omp_for_firstprivate() { int sum; int sum0; int known_sum; int threadsnum; sum = 0; sum0 = 12345; sum1 = 0; #pragma omp parallel { #pragma omp single { threadsnum=omp_get_num_threads(); } /* sum0 = 0; */ int i; #pragma omp for firstprivate(sum0) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; sum1 = sum0; } /* end of for */ #pragma omp critical { sum = sum + sum1; } /* end of critical */ } /* end of parallel */ known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_firstprivate()) { num_failed++; } } return num_failed; }
gimple.h
/* Gimple IR definitions. Copyright 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H #include "pointer-set.h" #include "vec.h" #include "vecprim.h" #include "vecir.h" #include "ggc.h" #include "basic-block.h" #include "tree-ssa-operands.h" #include "tree-ssa-alias.h" #include "internal-fn.h" struct gimple_seq_node_d; typedef struct gimple_seq_node_d *gimple_seq_node; typedef const struct gimple_seq_node_d *const_gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef VEC(gimple, heap) *gimple_vec; DEF_VEC_P (gimple_vec); DEF_VEC_ALLOC_P (gimple_vec, heap); enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use sub-codes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_FROM_THUNK = 1 << 0, GF_CALL_RETURN_SLOT_OPT = 1 << 1, GF_CALL_TAILCALL = 1 << 2, GF_CALL_VA_ARG_PACK = 1 << 3, GF_CALL_NOTHROW = 1 << 4, GF_CALL_ALLOCA_FOR_VAR = 1 << 5, GF_CALL_INTERNAL = 1 << 6, GF_OMP_PARALLEL_COMBINED = 1 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_OMP_ATOMIC_NEED_VALUE = 1 << 0, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there are only two types of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0, GIMPLE_DEBUG_SOURCE_BIND = 1 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* A node in a gimple_seq_d. */ struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d { gimple stmt; struct gimple_seq_node_d *prev; struct gimple_seq_node_d *next; }; /* A double-linked sequence of gimple statements. */ struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d { /* First and last statements in the sequence. */ gimple_seq_node first; gimple_seq_node last; /* Sequences are created/destroyed frequently. To minimize allocation activity, deallocated sequences are kept in a pool of available sequences. This is the pointer to the next free sequence in the pool. */ gimple_seq next_free; }; /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (const_gimple_seq s) { return s ? s->first : NULL; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return (n) ? n->stmt : NULL; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (const_gimple_seq s) { return s ? s->last : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (const_gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return (n) ? n->stmt : NULL; } /* Set the last node in GIMPLE sequence S to LAST. */ static inline void gimple_seq_set_last (gimple_seq s, gimple_seq_node last) { s->last = last; } /* Set the first node in GIMPLE sequence S to FIRST. */ static inline void gimple_seq_set_first (gimple_seq s, gimple_seq_node first) { s->first = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (const_gimple_seq s) { return s == NULL || s->first == NULL; } void gimple_seq_add_stmt (gimple_seq *, gimple); /* Link gimple statement GS to the end of the sequence *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_stmt, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ void gimple_seq_add_stmt_without_update (gimple_seq *, gimple); /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple->seq = seq; } /* Iterator object for GIMPLE statement sequences. */ typedef struct { /* Sequence node holding the current statement. */ gimple_seq_node ptr; /* Sequence and basic block holding the statement. These fields are necessary to handle edge cases such as when statement is added to an empty basic block or when the last statement of a block/sequence is removed. */ gimple_seq seq; basic_block bb; } gimple_stmt_iterator; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY(()) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code) code : 8; /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ struct basic_block_def *bb; /* [ WORD 4 ] Lexical block holding this statement. */ tree block; }; /* Base structure for tuples with operands. */ struct GTY(()) gimple_statement_with_ops_base { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct def_optype_d GTY((skip (""))) *def_ops; struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY(()) gimple_statement_with_ops { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops_base { /* [ WORD 1-6 ] */ struct gimple_statement_with_ops_base opbase; /* [ WORD 7-8 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY(()) gimple_statement_with_memory_ops { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY(()) gimple_statement_call { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9-12 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 13 ] */ union GTY ((desc ("%1.membase.opbase.gsbase.subcode & GF_CALL_INTERNAL"))) { tree GTY ((tag ("0"))) fntype; enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn; } u; /* [ WORD 14 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY(()) gimple_statement_omp { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY(()) gimple_statement_bind { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Variables declared in this scope. */ tree vars; /* [ WORD 6 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY(()) gimple_statement_catch { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree types; /* [ WORD 6 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY(()) gimple_statement_eh_filter { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Filter types. */ tree types; /* [ WORD 6 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_ELSE */ struct GTY(()) gimple_statement_eh_else { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5,6 ] */ gimple_seq n_body, e_body; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY(()) gimple_statement_eh_mnt { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY(()) gimple_statement_phi { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ unsigned capacity; unsigned nargs; /* [ WORD 6 ] */ tree result; /* [ WORD 7 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY(()) gimple_statement_eh_ctrl { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Exception region number. */ int region; }; /* GIMPLE_TRY */ struct GTY(()) gimple_statement_try { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 6 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY(()) gimple_statement_wce { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 5 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY(()) gimple_statement_asm { /* [ WORD 1-8 ] */ struct gimple_statement_with_memory_ops_base membase; /* [ WORD 9 ] __asm__ statement. */ const char *string; /* [ WORD 10 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 11 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY(()) gimple_statement_omp_critical { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY(()) gimple_statement_omp_for { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 8 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 9 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL */ struct GTY(()) gimple_statement_omp_parallel { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] Clauses. */ tree clauses; /* [ WORD 7 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 8 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_TASK */ struct GTY(()) gimple_statement_omp_task { /* [ WORD 1-8 ] */ struct gimple_statement_omp_parallel par; /* [ WORD 9 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 10-11 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY(()) gimple_statement_omp_sections { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; /* [ WORD 7 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY(()) gimple_statement_omp_continue { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree control_def; /* [ WORD 6 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE */ struct GTY(()) gimple_statement_omp_single { /* [ WORD 1-5 ] */ struct gimple_statement_omp omp; /* [ WORD 6 ] */ tree clauses; }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY(()) gimple_statement_omp_atomic_load { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5-6 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY(()) gimple_statement_omp_atomic_store { /* [ WORD 1-4 ] */ struct gimple_statement_base gsbase; /* [ WORD 5 ] */ tree val; }; /* GIMPLE_TRANSACTION. */ /* Bits to be stored in the GIMPLE_TRANSACTION subcode. */ /* The __transaction_atomic was declared [[outer]] or it is __transaction_relaxed. */ #define GTMA_IS_OUTER (1u << 0) #define GTMA_IS_RELAXED (1u << 1) #define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED) /* The transaction is seen to not have an abort. */ #define GTMA_HAVE_ABORT (1u << 2) /* The transaction is seen to have loads or stores. */ #define GTMA_HAVE_LOAD (1u << 3) #define GTMA_HAVE_STORE (1u << 4) /* The transaction MAY enter serial irrevocable mode in its dynamic scope. */ #define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5) /* The transaction WILL enter serial irrevocable mode. An irrevocable block post-dominates the entire transaction, such that all invocations of the transaction will go serial-irrevocable. In such case, we don't bother instrumenting the transaction, and tell the runtime that it should begin the transaction in serial-irrevocable mode. */ #define GTMA_DOES_GO_IRREVOCABLE (1u << 6) struct GTY(()) gimple_statement_transaction { /* [ WORD 1-10 ] */ struct gimple_statement_with_memory_ops_base gsbase; /* [ WORD 11 ] */ gimple_seq body; /* [ WORD 12 ] */ tree label; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT /* Define the overall contents of a gimple tuple. It may be any of the structures declared above for various types of tuples. */ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_statement_d { struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase; struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops; struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase; struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem; struct gimple_statement_call GTY ((tag ("GSS_CALL"))) gimple_call; struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp; struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind; struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter; struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt; struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else; struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl; struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce; struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm; struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical; struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for; struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel; struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task; struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections; struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single; struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue; struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction; }; /* In gimple.c. */ /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; gimple gimple_build_return (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) void extract_ops_from_tree_1 (tree, enum tree_code *, tree *, tree *, tree *); gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree, tree, tree MEM_STAT_DECL); #define gimple_build_assign_with_ops(c,o1,o2,o3) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, NULL_TREE MEM_STAT_INFO) #define gimple_build_assign_with_ops3(c,o1,o2,o3,o4) \ gimple_build_assign_with_ops_stat (c, o1, o2, o3, o4 MEM_STAT_INFO) gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_source_bind(var,val,stmt) \ gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_call_vec (tree, VEC(tree, heap) *); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_valist (tree, unsigned, va_list); gimple gimple_build_call_internal (enum internal_fn, unsigned, ...); gimple gimple_build_call_internal_vec (enum internal_fn, VEC(tree, heap) *); gimple gimple_build_call_from_tree (tree); gimple gimplify_assign (tree, tree, gimple_seq *); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *, VEC(tree,gc) *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_eh_else (gimple_seq, gimple_seq); gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_eh_dispatch (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (unsigned, tree, tree, ...); gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq); gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_cdt (tree, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_transaction (gimple_seq, tree); gimple gimple_build_predict (enum br_predictor, enum prediction); enum gimple_statement_structure_enum gss_for_assign (enum tree_code); void sort_case_labels (VEC(tree,heap) *); void gimple_set_body (tree, gimple_seq); gimple_seq gimple_body (tree); bool gimple_has_body_p (tree); gimple_seq gimple_seq_alloc (void); void gimple_seq_free (gimple_seq); void gimple_seq_add_seq (gimple_seq *, gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); bool gimple_call_same_target_p (const_gimple, const_gimple); int gimple_call_flags (const_gimple); int gimple_call_return_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); void gimple_call_reset_alias_info (gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, struct basic_block_def *); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); void gimple_replace_lhs (gimple, tree); gimple gimple_copy (gimple); void gimple_set_modified (gimple, bool); void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); bool gimple_has_side_effects (const_gimple); bool gimple_could_trap_p (gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_assign_rhs_could_trap_p (gimple); void gimple_regimplify_operands (gimple, gimple_stmt_iterator *); bool empty_body_p (gimple_seq); unsigned get_gimple_rhs_num_ops (enum tree_code); #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); const char *gimple_decl_printable_name (tree, int); tree gimple_get_virt_method_for_binfo (HOST_WIDE_INT, tree); void gimple_adjust_this_by_delta (gimple_stmt_iterator *, tree); tree gimple_extract_devirt_binfo_from_cst (tree); /* Returns true iff T is a valid GIMPLE statement. */ extern bool is_gimple_stmt (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is any sort of symbol. */ extern bool is_gimple_id (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is something whose address can be taken. */ extern bool is_gimple_addressable (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE address. */ bool is_gimple_address (const_tree); /* Returns true iff T is a GIMPLE invariant address. */ bool is_gimple_invariant_address (const_tree); /* Returns true iff T is a GIMPLE invariant address at interprocedural level. */ bool is_gimple_ip_invariant_address (const_tree); /* Returns true iff T is a valid GIMPLE constant. */ bool is_gimple_constant (const_tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (const_tree); /* Returns true iff T is a GIMPLE restricted interprecodural invariant. */ extern bool is_gimple_ip_invariant (const_tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a GIMPLE asm statement input. */ extern bool is_gimple_asm_val (tree); /* Returns true iff T is a valid address operand of a MEM_REF. */ bool is_gimple_mem_ref_addr (tree); /* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a GIMPLE temporary, a renamed user variable, or something else, respectively. */ extern bool is_gimple_reg_rhs (tree); extern bool is_gimple_mem_rhs (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); extern void recalculate_side_effects (tree); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_register_type (tree); extern tree gimple_register_canonical_type (tree); extern void print_gimple_types_stats (void); extern void free_gimple_type_tables (void); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *, unsigned *); extern bool walk_stmt_load_store_addr_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool walk_stmt_load_store_ops (gimple, void *, bool (*)(gimple, tree, void *), bool (*)(gimple, tree, void *)); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_function); extern bool gimple_asm_clobbers_memory_p (const_gimple); /* In gimplify.c */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern tree create_tmp_reg (tree, const char *); extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *); extern tree get_formal_tmp_var (tree, gimple_seq *); extern void declare_vars (tree, gimple, bool); extern void annotate_all_with_location (gimple_seq, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ typedef bool (*gimple_predicate)(tree); /* FIXME we should deduce this from the predicate. */ enum fallback { fb_none = 0, /* Do not generate a temporary. */ fb_rvalue = 1, /* Generate an rvalue to hold the result of a gimplified expression. */ fb_lvalue = 2, /* Generate an lvalue to hold the result of a gimplified expression. */ fb_mayfail = 4, /* Gimplification may fail. Error issued afterwards. */ fb_either= fb_rvalue | fb_lvalue }; typedef int fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; struct gimplify_ctx { struct gimplify_ctx *prev_context; VEC(gimple,heap) *bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; bool allow_rhs_cond_expr; bool in_cleanup_point_expr; }; extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, gimple_seq *); extern void gimplify_one_sizepos (tree *, gimple_seq *); extern bool gimplify_stmt (tree *, gimple_seq *); extern gimple gimplify_body (tree, bool); extern void push_gimplify_context (struct gimplify_ctx *); extern void pop_gimplify_context (gimple); extern void gimplify_and_add (tree, gimple_seq *); /* Miscellaneous helpers. */ extern void gimple_add_tmp_var (tree); extern gimple gimple_current_bind_expr (void); extern VEC(gimple, heap) *gimple_bind_expr_stack (void); extern tree voidify_wrapper_expr (tree, tree); extern tree build_and_jump (tree *); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *, gimple_seq *); struct gimplify_omp_ctx; extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree); extern tree gimple_boolify (tree); extern gimple_predicate rhs_predicate_for (tree); extern tree canonicalize_cond_expr_cond (tree); /* In omp-low.c. */ extern tree omp_reduction_init (tree, tree); /* In trans-mem.c. */ extern void diagnose_tm_safe_errors (tree); extern void compute_transaction_bits (void); /* In tree-nested.c. */ extern void lower_nested_functions (tree); extern void insert_field_into_struct (tree, tree); /* In gimplify.c. */ extern void gimplify_function_tree (tree); /* In cfgexpand.c. */ extern tree gimple_assign_rhs_to_tree (gimple); /* In builtins.c */ extern bool validate_gimple_arglist (const_gimple, ...); /* In tree-ssa.c */ extern bool tree_ssa_useless_type_conversion (tree); extern tree tree_ssa_strip_useless_type_conversions (tree); extern bool useless_type_conversion_p (tree, tree); extern bool types_compatible_p (tree, tree); /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->gsbase.code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_EH_ELSE: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: case GIMPLE_TRANSACTION: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline struct basic_block_def * gimple_bb (const_gimple g) { return g->gsbase.bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return g->gsbase.block; } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { g->gsbase.block = block; } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->gsbase.location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->gsbase.location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->gsbase.location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return gimple_location (g) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->gsbase.no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->gsbase.no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->gsbase.visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->gsbase.visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->gsbase.plf |= (unsigned int) plf; else stmt->gsbase.plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->gsbase.plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->gsbase.uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->gsbase.uid; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } /* Return the set of DEF operands for statement G. */ static inline struct def_optype_d * gimple_def_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.def_ops; } /* Set DEF to be the set of DEF operands for statement G. */ static inline void gimple_set_def_ops (gimple g, struct def_optype_d *def) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.def_ops = def; } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { if (!gimple_has_ops (g)) return NULL; return g->gsops.opbase.use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gcc_gimple_checking_assert (gimple_has_ops (g)); g->gsops.opbase.use_ops = use; } /* Return the set of VUSE operand for statement G. */ static inline use_operand_p gimple_vuse_op (const_gimple g) { struct use_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_USE_OPERAND_P; ops = g->gsops.opbase.use_ops; if (ops && USE_OP_PTR (ops)->use == &g->gsmembase.vuse) return USE_OP_PTR (ops); return NULL_USE_OPERAND_P; } /* Return the set of VDEF operand for statement G. */ static inline def_operand_p gimple_vdef_op (const_gimple g) { struct def_optype_d *ops; if (!gimple_has_mem_ops (g)) return NULL_DEF_OPERAND_P; ops = g->gsops.opbase.def_ops; if (ops && DEF_OP_PTR (ops) == &g->gsmembase.vdef) return DEF_OP_PTR (ops); return NULL_DEF_OPERAND_P; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { if (!gimple_has_mem_ops (g)) return NULL_TREE; return g->gsmembase.vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { if (!gimple_has_mem_ops (g)) return NULL; return &g->gsmembase.vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gcc_gimple_checking_assert (gimple_has_mem_ops (g)); g->gsmembase.vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->gsbase.subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Mark statement S as modified, and update it. */ static inline void update_stmt (gimple s) { if (gimple_has_ops (s)) { gimple_set_modified (s, true); update_stmt_operands (s); } } /* Update statement S if it has been optimized. */ static inline void update_stmt_if_modified (gimple s) { if (gimple_modified_p (s)) update_stmt_operands (s); } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->gsbase.has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->gsbase.has_volatile_ops = (unsigned) volatilep; } /* Return true if BB is in a transaction. */ static inline bool block_in_transaction (basic_block bb) { return flag_tm && bb->flags & BB_IN_TRANSACTION; } /* Return true if STMT is in a transaction. */ static inline bool gimple_in_transaction (gimple stmt) { return block_in_transaction (gimple_bb (stmt)); } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_SINGLE); return s->gsbase.subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->gsbase.subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->gsbase.subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED; else g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return true if OMP atomic load/store statement G has the GF_OMP_ATOMIC_NEED_VALUE flag set. */ static inline bool gimple_omp_atomic_need_value_p (const_gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0; } /* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */ static inline void gimple_omp_atomic_set_need_value (gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gsbase.subcode |= GF_OMP_ATOMIC_NEED_VALUE; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->gsbase.num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->gsbase.num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* A wrapper around extract_ops_from_tree_1, for callers which expect to see only a maximum of two operands. */ static inline void extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0, tree *op1) { tree op2; extract_ops_from_tree_1 (expr, code, op0, op1, &op2); gcc_assert (op2 == NULL_TREE); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->gsbase.nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->gsbase.nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->gsbase.subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->gsbase.subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if S is a clobber statement. */ static inline bool gimple_clobber_p (gimple s) { return gimple_assign_single_p (s) && TREE_CLOBBER_P (gimple_assign_rhs1 (s)); } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return true if call GS calls an internal-only function, as enumerated by internal_fn. */ static inline bool gimple_call_internal_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return (gs->gsbase.subcode & GF_CALL_INTERNAL) != 0; } /* Return the target of internal call GS. */ static inline enum internal_fn gimple_call_internal_fn (const_gimple gs) { gcc_gimple_checking_assert (gimple_call_internal_p (gs)); return gs->gimple_call.u.internal_fn; } /* Return the function type of the function called by GS. */ static inline tree gimple_call_fntype (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); if (gimple_call_internal_p (gs)) return NULL_TREE; return gs->gimple_call.u.fntype; } /* Set the type of the function called by GS to FNTYPE. */ static inline void gimple_call_set_fntype (gimple gs, tree fntype) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gs->gimple_call.u.fntype = fntype; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* Set internal function FN to be the function called by call statement GS. */ static inline void gimple_call_set_internal_fn (gimple gs, enum internal_fn fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (gimple_call_internal_p (gs)); gs->gimple_call.u.internal_fn = fn; } /* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL associated with the callee if known. Otherwise return NULL_TREE. */ static inline tree gimple_call_addr_fndecl (const_tree fn) { if (fn && TREE_CODE (fn) == ADDR_EXPR) { tree fndecl = TREE_OPERAND (fn, 0); if (TREE_CODE (fndecl) == MEM_REF && TREE_CODE (TREE_OPERAND (fndecl, 0)) == ADDR_EXPR && integer_zerop (TREE_OPERAND (fndecl, 1))) fndecl = TREE_OPERAND (TREE_OPERAND (fndecl, 0), 0); if (TREE_CODE (fndecl) == FUNCTION_DECL) return fndecl; } return NULL_TREE; } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { return gimple_call_addr_fndecl (gimple_call_fn (gs)); } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree type = gimple_call_fntype (gs); if (type == NULL_TREE) return TREE_TYPE (gimple_call_lhs (gs)); /* The type returned by a function is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->gsbase.subcode |= GF_CALL_TAILCALL; else s->gsbase.subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT; else s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->gsbase.subcode |= GF_CALL_FROM_THUNK; else s->gsbase.subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->gsbase.subcode |= GF_CALL_VA_ARG_PACK; else s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->gsbase.subcode |= GF_CALL_NOTHROW; else s->gsbase.subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that is known to be emitted for VLA objects. Those are wrapped by stack_save/stack_restore calls and hence can't lead to unbounded stack growth even when they occur in loops. */ static inline void gimple_call_set_alloca_for_var (gimple s, bool for_var) { GIMPLE_CHECK (s, GIMPLE_CALL); if (for_var) s->gsbase.subcode |= GF_CALL_ALLOCA_FOR_VAR; else s->gsbase.subcode &= ~GF_CALL_ALLOCA_FOR_VAR; } /* Return true of S is a call to builtin_alloca emitted for VLA objects. */ static inline bool gimple_call_alloca_for_var_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->gsbase.subcode = orig_call->gsbase.subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { GIMPLE_CHECK (call, GIMPLE_CALL); return &call->gimple_call.call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->gsbase.subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->gsbase.subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->gsbase.subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->gsbase.subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (var != 0)' or 'if (var == 1)' */ static inline bool gimple_cond_single_var_p (gimple gs) { if (gimple_cond_code (gs) == NE_EXPR && gimple_cond_rhs (gs) == boolean_false_node) return true; if (gimple_cond_code (gs) == EQ_EXPR && gimple_cond_rhs (gs) == boolean_true_node) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars); } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.body; } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gs->gimple_bind.body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_stmt (&gs->gimple_bind.body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_BIND); gimple_seq_add_seq (&gs->gimple_bind.body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_BIND); return gs->gimple_bind.block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { GIMPLE_CHECK (gs, GIMPLE_BIND); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); gs->gimple_bind.block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op (gs, index); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni); return gimple_op_ptr (gs, index); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op (gs, index + gs->gimple_asm.ni); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no); return gimple_op_ptr (gs, index + gs->gimple_asm.ni); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl); return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { GIMPLE_CHECK (gs, GIMPLE_ASM); gcc_gimple_checking_assert (index <= gs->gimple_asm.nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return gs->gimple_asm.string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->gsbase.subcode |= GF_ASM_VOLATILE; else gs->gsbase.subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->gsbase.subcode |= GF_ASM_INPUT; else gs->gsbase.subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->gsbase.subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.types; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return gs->gimple_catch.handler; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CATCH); return &gs->gimple_catch.handler; } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { GIMPLE_CHECK (gs, GIMPLE_CATCH); gs->gimple_catch.handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return &gs->gimple_eh_filter.types; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); return gs->gimple_eh_filter.failure; } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { GIMPLE_CHECK (gs, GIMPLE_EH_FILTER); gs->gimple_eh_filter.failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); return gs->gimple_eh_mnt.fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW); gs->gimple_eh_mnt.fndecl = decl; } /* GIMPLE_EH_ELSE accessors. */ static inline gimple_seq gimple_eh_else_n_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); return gs->gimple_eh_else.n_body; } static inline gimple_seq gimple_eh_else_e_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); return gs->gimple_eh_else.e_body; } static inline void gimple_eh_else_set_n_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); gs->gimple_eh_else.n_body = seq; } static inline void gimple_eh_else_set_e_body (gimple gs, gimple_seq seq) { GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); gs->gimple_eh_else.e_body = seq; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->gsbase.subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.eval; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return gs->gimple_try.cleanup; } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_TRY); gs->gimple_try.cleanup = cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gimple_wce.cleanup; } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gimple_wce.cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->gsbase.subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->gsbase.subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return gs->gimple_phi.result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PHI); return &gs->gimple_phi.result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { GIMPLE_CHECK (gs, GIMPLE_PHI); gs->gimple_phi.result = result; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.capacity); return &(gs->gimple_phi.args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { GIMPLE_CHECK (gs, GIMPLE_PHI); gcc_gimple_checking_assert (index <= gs->gimple_phi.nargs); gs->gimple_phi.args[index] = *phiarg; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RESX); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_RESX); gs->gimple_eh_ctrl.region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); return gs->gimple_eh_ctrl.region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH); gs->gimple_eh_ctrl.region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { return gimple_switch_label (gs, 0); } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */ static inline bool gimple_debug_source_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->gsbase.subcode == GIMPLE_DEBUG_SOURCE_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree * gimple_debug_source_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return gs->omp.body; } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { gs->omp.body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return gs->gimple_omp_critical.name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); return &gs->gimple_omp_critical.name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL); gs->gimple_omp_critical.name = name; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return &gs->gimple_omp_for.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return &gs->gimple_omp_for.iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].incr = incr; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); return gs->gimple_omp_for.pre_body; } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gs->gimple_omp_for.pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_parallel.data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_parallel.data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { if (gimple_code (gs) != GIMPLE_OMP_PARALLEL) GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_parallel.data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return gs->gimple_omp_task.arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); return &gs->gimple_omp_task.arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { GIMPLE_CHECK (gs, GIMPLE_OMP_TASK); gs->gimple_omp_task.arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return gs->gimple_omp_single.clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); return &gs->gimple_omp_single.clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE); gs->gimple_omp_single.clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return gs->gimple_omp_sections.control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); return &gs->gimple_omp_sections.control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS); gs->gimple_omp_sections.control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < gs->gimple_omp_for.collapse); gs->gimple_omp_for.iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { GIMPLE_CHECK (gs, GIMPLE_OMP_FOR); gcc_gimple_checking_assert (i < gs->gimple_omp_for.collapse); return gs->gimple_omp_for.iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->gimple_omp_atomic_store.val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return g->gimple_omp_atomic_store.val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return &g->gimple_omp_atomic_store.val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); g->gimple_omp_atomic_load.rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return g->gimple_omp_atomic_load.rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD); return &g->gimple_omp_atomic_load.rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return g->gimple_omp_continue.control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); return &g->gimple_omp_continue.control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE); g->gimple_omp_continue.control_use = use; } /* Return the body for the GIMPLE_TRANSACTION statement GS. */ static inline gimple_seq gimple_transaction_body (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gimple_transaction.body; } /* Return the label associated with a GIMPLE_TRANSACTION. */ static inline tree gimple_transaction_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gimple_transaction.label; } static inline tree * gimple_transaction_label_ptr (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return &gs->gimple_transaction.label; } /* Return the subcode associated with a GIMPLE_TRANSACTION. */ static inline unsigned int gimple_transaction_subcode (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->gsbase.subcode; } /* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */ static inline void gimple_transaction_set_body (gimple gs, gimple_seq body) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gimple_transaction.body = body; } /* Set the label associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gimple_transaction.label = label; } /* Set the subcode associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_subcode (gimple gs, unsigned int subcode) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->gsbase.subcode = subcode; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statment STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->gsbase.subcode |= GF_PREDICT_TAKEN; else gs->gsbase.subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) type = gimple_call_return_type (stmt); else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Return true if TYPE is a suitable type for a scalar register variable. */ static inline bool is_gimple_reg_type (tree type) { return !AGGREGATE_TYPE_P (type); } /* Return a new iterator pointing to GIMPLE_SEQ's first statement. */ static inline gimple_stmt_iterator gsi_start (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the first statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_first (seq); i.seq = seq; i.bb = bb; return i; } /* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */ static inline gimple_stmt_iterator gsi_last (gimple_seq seq) { gimple_stmt_iterator i; i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL; return i; } /* Return a new iterator pointing to the last statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_bb (basic_block bb) { gimple_stmt_iterator i; gimple_seq seq; seq = bb_seq (bb); i.ptr = gimple_seq_last (seq); i.seq = seq; i.bb = bb; return i; } /* Return true if I is at the end of its sequence. */ static inline bool gsi_end_p (gimple_stmt_iterator i) { return i.ptr == NULL; } /* Return true if I is one statement before the end of its sequence. */ static inline bool gsi_one_before_end_p (gimple_stmt_iterator i) { return i.ptr != NULL && i.ptr->next == NULL; } /* Advance the iterator to the next gimple statement. */ static inline void gsi_next (gimple_stmt_iterator *i) { i->ptr = i->ptr->next; } /* Advance the iterator to the previous gimple statement. */ static inline void gsi_prev (gimple_stmt_iterator *i) { i->ptr = i->ptr->prev; } /* Return the current stmt. */ static inline gimple gsi_stmt (gimple_stmt_iterator i) { return i.ptr->stmt; } /* Return a block statement iterator that points to the first non-label statement in block BB. */ static inline gimple_stmt_iterator gsi_after_labels (basic_block bb) { gimple_stmt_iterator gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL) gsi_next (&gsi); return gsi; } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_next_nondebug (gimple_stmt_iterator *i) { do { gsi_next (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Advance the iterator to the next non-debug gimple statement. */ static inline void gsi_prev_nondebug (gimple_stmt_iterator *i) { do { gsi_prev (i); } while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i))); } /* Return a new iterator pointing to the first non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_start_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_start_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_next_nondebug (&i); return i; } /* Return a new iterator pointing to the last non-debug statement in basic block BB. */ static inline gimple_stmt_iterator gsi_last_nondebug_bb (basic_block bb) { gimple_stmt_iterator i = gsi_last_bb (bb); if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i))) gsi_prev_nondebug (&i); return i; } /* Return a pointer to the current stmt. NOTE: You may want to use gsi_replace on the iterator itself, as this performs additional bookkeeping that will not be done if you simply assign through a pointer returned by gsi_stmt_ptr. */ static inline gimple * gsi_stmt_ptr (gimple_stmt_iterator *i) { return &i->ptr->stmt; } /* Return the basic block associated with this iterator. */ static inline basic_block gsi_bb (gimple_stmt_iterator i) { return i.bb; } /* Return the sequence associated with this iterator. */ static inline gimple_seq gsi_seq (gimple_stmt_iterator i) { return i.seq; } enum gsi_iterator_update { GSI_NEW_STMT, /* Only valid when single statement is added, move iterator to it. */ GSI_SAME_STMT, /* Leave the iterator at the same statement. */ GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements in the same direction. */ }; /* In gimple-iterator.c */ gimple_stmt_iterator gsi_start_phis (basic_block); gimple_seq gsi_split_seq_after (gimple_stmt_iterator); gimple_seq gsi_split_seq_before (gimple_stmt_iterator *); void gsi_replace (gimple_stmt_iterator *, gimple, bool); void gsi_insert_before (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_after (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple, enum gsi_iterator_update); void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq, enum gsi_iterator_update); void gsi_remove (gimple_stmt_iterator *, bool); gimple_stmt_iterator gsi_for_stmt (gimple); void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *); void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *); void gsi_insert_on_edge (edge, gimple); void gsi_insert_seq_on_edge (edge, gimple_seq); basic_block gsi_insert_on_edge_immediate (edge, gimple); basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq); void gsi_commit_one_edge_insert (edge, basic_block *); void gsi_commit_edge_inserts (void); gimple gimple_call_copy_skip_args (gimple, bitmap); /* Convenience routines to walk all statements of a gimple function. Note that this is useful exclusively before the code is converted into SSA form. Once the program is in SSA form, the standard operand interface should be used to analyze/modify statements. */ struct walk_stmt_info { /* Points to the current statement being walked. */ gimple_stmt_iterator gsi; /* Additional data that the callback functions may want to carry through the recursion. */ void *info; /* Pointer map used to mark visited tree nodes when calling walk_tree on each operand. If set to NULL, duplicate tree nodes will be visited more than once. */ struct pointer_set_t *pset; /* Operand returned by the callbacks. This is set when calling walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback returns non-NULL, this field will contain the tree returned by the last callback. */ tree callback_result; /* Indicates whether the operand being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. Also note that CALLBACK should update this flag while walking the sub-expressions of a statement. For instance, when walking the statement 'foo (&var)', the flag VAL_ONLY will initially be set to true, however, when walking &var, the operand of that ADDR_EXPR does not need to be a GIMPLE value. */ BOOL_BITFIELD val_only : 1; /* True if we are currently walking the LHS of an assignment. */ BOOL_BITFIELD is_lhs : 1; /* Optional. Set to true by the callback functions if they made any changes. */ BOOL_BITFIELD changed : 1; /* True if we're interested in location information. */ BOOL_BITFIELD want_locations : 1; /* True if we've removed the statement that was processed. */ BOOL_BITFIELD removed_stmt : 1; }; /* Callback for walk_gimple_stmt. Called for every statement found during traversal. The first argument points to the statement to walk. The second argument is a flag that the callback sets to 'true' if it the callback handled all the operands and sub-statements of the statement (the default value of this flag is 'false'). The third argument is an anonymous pointer to data to be used by the callback. */ typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *, struct walk_stmt_info *); gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn, struct walk_stmt_info *); tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *); #ifdef GATHER_STATISTICS /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_seq, /* Sequences. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } #endif /* GATHER_STATISTICS */ extern void dump_gimple_statistics (void); /* In gimple-fold.c. */ void gimplify_and_update_call_from_tree (gimple_stmt_iterator *, tree); tree gimple_fold_builtin (gimple); bool fold_stmt (gimple_stmt_iterator *); bool fold_stmt_inplace (gimple_stmt_iterator *); tree get_symbol_constant_value (tree); tree canonicalize_constructor_val (tree); extern tree maybe_fold_and_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); extern tree maybe_fold_or_comparisons (enum tree_code, tree, tree, enum tree_code, tree, tree); bool gimple_val_nonnegative_real_p (tree); #endif /* GCC_GIMPLE_H */
concurrent_unordered_map.cuh.h
/* * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CONCURRENT_UNORDERED_MAP_CUH #define CONCURRENT_UNORDERED_MAP_CUH #include <thrust/pair.h> #include <cassert> #include <iostream> #include <iterator> #include <type_traits> #include "hash_functions.cuh" #include "managed.cuh" #include "managed_allocator.cuh" // TODO: replace this with CUDA_TRY and propagate the error #ifndef CUDA_RT_CALL #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) { \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \ cudaStatus); \ exit(1); \ } \ } #endif // TODO: can we do this more efficiently? __inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare, int8_t val) { int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3)); int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8); return (int8_t)atomicCAS(base_address, int_comp, int_val); } // TODO: can we do this more efficiently? __inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare, int16_t val) { int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2)); int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8); return (int16_t)atomicCAS(base_address, int_comp, int_val); } __inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare, int64_t val) { return (int64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare, uint64_t val) { return (uint64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ long long int atomicCAS(long long int* address, long long int compare, long long int val) { return (long long int)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ double atomicCAS(double* address, double compare, double val) { return __longlong_as_double(atomicCAS((unsigned long long int*)address, __double_as_longlong(compare), __double_as_longlong(val))); } __inline__ __device__ float atomicCAS(float* address, float compare, float val) { return __int_as_float( atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val))); } __inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) { return (int64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } __inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) { return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template <typename pair_type> __forceinline__ __device__ pair_type load_pair_vectorized(const pair_type* __restrict__ const ptr) { if (sizeof(uint4) == sizeof(pair_type)) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0, 0, 0}; converter.vec_val = *reinterpret_cast<const uint4*>(ptr); return converter.pair_val; } else if (sizeof(uint2) == sizeof(pair_type)) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0}; converter.vec_val = *reinterpret_cast<const uint2*>(ptr); return converter.pair_val; } else if (sizeof(int) == sizeof(pair_type)) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const int*>(ptr); return converter.pair_val; } else if (sizeof(short) == sizeof(pair_type)) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const short*>(ptr); return converter.pair_val; } else { return *ptr; } } template <typename pair_type> __forceinline__ __device__ void store_pair_vectorized( pair_type* __restrict__ const ptr, const pair_type val) { if (sizeof(uint4) == sizeof(pair_type)) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0, 0, 0}; converter.pair_val = val; *reinterpret_cast<uint4*>(ptr) = converter.vec_val; } else if (sizeof(uint2) == sizeof(pair_type)) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0}; converter.pair_val = val; *reinterpret_cast<uint2*>(ptr) = converter.vec_val; } else if (sizeof(int) == sizeof(pair_type)) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<int*>(ptr) = converter.vec_val; } else if (sizeof(short) == sizeof(pair_type)) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<short*>(ptr) = converter.vec_val; } else { *ptr = val; } } template <typename value_type, typename size_type, typename key_type, typename elem_type> __global__ void init_hashtbl( // Init every entry of the table with // <unused_key, unused_value> pair value_type* __restrict__ const hashtbl_values, const size_type n, const key_type key_val, const elem_type elem_val) { const size_type idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n) { store_pair_vectorized( hashtbl_values + idx, thrust::make_pair( key_val, elem_val)); // Simply store every element a <K, V> pair } } template <typename T> struct equal_to { using result_type = bool; using first_argument_type = T; using second_argument_type = T; __forceinline__ __host__ __device__ constexpr bool operator()( const first_argument_type& lhs, const second_argument_type& rhs) const { return lhs == rhs; } }; template <typename Iterator> class cycle_iterator_adapter { public: using value_type = typename std::iterator_traits<Iterator>::value_type; using difference_type = typename std::iterator_traits<Iterator>::difference_type; using pointer = typename std::iterator_traits<Iterator>::pointer; using reference = typename std::iterator_traits<Iterator>::reference; using iterator_type = Iterator; cycle_iterator_adapter() = delete; __host__ __device__ explicit cycle_iterator_adapter( const iterator_type& begin, const iterator_type& end, const iterator_type& current) : m_begin(begin), m_end(end), m_current(current) {} __host__ __device__ cycle_iterator_adapter& operator++() { if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ const cycle_iterator_adapter& operator++() const { if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ cycle_iterator_adapter& operator++(int) { cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current); if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return old; } __host__ __device__ const cycle_iterator_adapter& operator++(int)const { cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current); if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return old; } __host__ __device__ bool equal( const cycle_iterator_adapter<iterator_type>& other) const { return m_current == other.m_current && m_begin == other.m_begin && m_end == other.m_end; } __host__ __device__ reference& operator*() { return *m_current; } __host__ __device__ const reference& operator*() const { return *m_current; } __host__ __device__ const pointer operator->() const { return m_current.operator->(); } __host__ __device__ pointer operator->() { return m_current; } __host__ __device__ iterator_type getter() const { return m_current; } private: iterator_type m_current; iterator_type m_begin; iterator_type m_end; }; template <class T> __host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return lhs.equal(rhs); } template <class T> __host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return !lhs.equal(rhs); } /** * Does support concurrent insert, but not concurrent insert and probping. * * TODO: * - add constructor that takes pointer to hash_table to avoid allocations * - extend interface to accept streams */ template <typename Key, typename Element, Key unused_key, typename Hasher = default_hash<Key>, typename Equality = equal_to<Key>, typename Allocator = managed_allocator<thrust::pair<Key, Element>>, bool count_collisions = false> class concurrent_unordered_map : public managed { public: using size_type = size_t; using hasher = Hasher; using key_equal = Equality; using allocator_type = Allocator; using key_type = Key; using value_type = thrust::pair<Key, Element>; using mapped_type = Element; using iterator = cycle_iterator_adapter<value_type*>; using const_iterator = const cycle_iterator_adapter<value_type*>; private: union pair2longlong { unsigned long long int longlong; value_type pair; }; public: concurrent_unordered_map(const concurrent_unordered_map&) = delete; concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete; explicit concurrent_unordered_map(size_type n, const mapped_type unused_element, const Hasher& hf = hasher(), const Equality& eql = key_equal(), const allocator_type& a = allocator_type()) : m_hf(hf), m_equal(eql), m_allocator(a), m_hashtbl_size(n), m_hashtbl_capacity(n), m_collisions(0), m_unused_element( unused_element) { // allocate the raw data of hash table: // m_hashtbl_values,pre-alloc it on current GPU if UM. m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity); constexpr int block_size = 128; { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values); #if CUDART_VERSION >= 10000 if (cudaSuccess == status && hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged) #else if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged) #endif { int dev_id = 0; CUDA_RT_CALL(cudaGetDevice(&dev_id)); CUDA_RT_CALL(cudaMemPrefetchAsync( m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0)); } } // Initialize kernel, set all entry to unused <K,V> init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>( m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element); // CUDA_RT_CALL( cudaGetLastError() ); CUDA_RT_CALL(cudaStreamSynchronize(0)); CUDA_RT_CALL(cudaGetLastError()); } ~concurrent_unordered_map() { m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity); } __host__ __device__ iterator begin() { return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values); } __host__ __device__ const_iterator begin() const { return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values); } __host__ __device__ iterator end() { return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values + m_hashtbl_size); } __host__ __device__ const_iterator end() const { return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values + m_hashtbl_size); } __host__ __device__ size_type size() const { return m_hashtbl_size; } __host__ __device__ value_type* data() const { return m_hashtbl_values; } __forceinline__ static constexpr __host__ __device__ key_type get_unused_key() { return unused_key; } // Generic update of a hash table value for any aggregator template <typename aggregation_type> __forceinline__ __device__ void update_existing_value( mapped_type& existing_value, value_type const& insert_pair, aggregation_type) { // update without CAS existing_value = insert_pair.second; } __forceinline__ __device__ void accum_existing_value_atomic( mapped_type& existing_value, value_type const& accum_pair) { // update with CAS // existing_value = insert_pair.second; int num_element = sizeof(existing_value.data) / sizeof(*(existing_value.data)); const mapped_type& accumulator = accum_pair.second; for (int i = 0; i < num_element; i++) { atomicAdd(existing_value.data + i, accumulator.data[i]); } // atomicAdd(&existing_value, double val) } // TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload // specifically for the // types where atomicAdd already has an overload. Otherwise the generic // update_existing_value will // be used. Specialization for COUNT aggregator /* __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<int32_t> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<int64_t> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<float> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<double> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } */ /* --------------------------------------------------------------------------*/ /** * @Synopsis Inserts a new (key, value) pair. If the key already exists in the map an aggregation operation is performed with the new value and existing value. E.g., if the aggregation operation is 'max', then the maximum is computed between the new value and existing value and the result is stored in the map. * * @Param[in] x The new (key, value) pair to insert * @Param[in] op The aggregation operation to perform * @Param[in] keys_equal An optional functor for comparing two keys * @Param[in] precomputed_hash Indicates if a precomputed hash value is being passed in to use * to determine the write location of the new key * @Param[in] precomputed_hash_value The precomputed hash value * @tparam aggregation_type A functor for a binary operation that performs the aggregation * @tparam comparison_type A functor for comparing two keys * * @Returns An iterator to the newly inserted key,value pair */ /* ----------------------------------------------------------------------------*/ template <typename aggregation_type, class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ iterator insert( const value_type& x, aggregation_type op, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if (true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(x.first); } size_type current_index = hash_value % hashtbl_size; value_type* current_hash_bucket = &(hashtbl_values[current_index]); const key_type insert_key = x.first; bool insert_success = false; size_type counter = 0; while (false == insert_success) { if (counter++ >= hashtbl_size) { return end(); } key_type& existing_key = current_hash_bucket->first; mapped_type& existing_value = current_hash_bucket->second; // Try and set the existing_key for the current hash bucket to insert_key const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key); // If old_key == unused_key, the current hash bucket was empty // and existing_key was updated to insert_key by the atomicCAS. // If old_key == insert_key, this key has already been inserted. // In either case, perform the atomic aggregation of existing_value and // insert_value // Because the hash table is initialized with the identity value of the // aggregation // operation, it is safe to perform the operation when the existing_value // still // has its initial value // TODO: Use template specialization to make use of native atomic // functions // TODO: How to handle data types less than 32 bits? if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) { update_existing_value(existing_value, x, op); insert_success = true; } current_index = (current_index + 1) % hashtbl_size; current_hash_bucket = &(hashtbl_values[current_index]); } return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size, current_hash_bucket); } /* This function is not currently implemented __forceinline__ __host__ __device__ iterator insert(const value_type& x) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; const size_type key_hash = m_hf( x.first ); size_type hash_tbl_idx = key_hash%hashtbl_size; value_type* it = 0; while (0 == it) { value_type* tmp_it = hashtbl_values + hash_tbl_idx; #ifdef __CUDA_ARCH__ if ( std::numeric_limits<key_type>::is_integer && std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int) == sizeof(value_type) ) { pair2longlong converter = {0ull}; converter.pair = thrust::make_pair( unused_key, m_unused_element ); const unsigned long long int unused = converter.longlong; converter.pair = x; const unsigned long long int value = converter.longlong; const unsigned long long int old_val = atomicCAS( reinterpret_cast<unsigned long long int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } else { const key_type old_key = atomicCAS( &(tmp_it->first), unused_key, x.first ); if ( m_equal( unused_key, old_key ) ) { (m_hashtbl_values+hash_tbl_idx)->second = x.second; it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } #else #pragma omp critical { if ( m_equal( unused_key, tmp_it->first ) ) { hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first, x.second ); it = tmp_it; } } #endif hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size; } return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it); } */ __forceinline__ __host__ __device__ const_iterator find(const key_type& k) const { size_type key_hash = m_hf(k); size_type hash_tbl_idx = key_hash % m_hashtbl_size; value_type* begin_ptr = 0; size_type counter = 0; while (0 == begin_ptr) { value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx; const key_type tmp_val = tmp_ptr->first; if (m_equal(k, tmp_val)) { begin_ptr = tmp_ptr; break; } if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) { begin_ptr = m_hashtbl_values + m_hashtbl_size; break; } hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size; ++counter; } return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, begin_ptr); } template <typename aggregation_type, typename counter_type, class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ iterator get_insert( const key_type& k, aggregation_type op, counter_type* value_counter, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if (true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(k); } size_type current_index = hash_value % hashtbl_size; value_type* current_hash_bucket = &(hashtbl_values[current_index]); const key_type insert_key = k; bool insert_success = false; size_type counter = 0; while (false == insert_success) { // Situation %5: No slot: All slot in the hashtable is occupied by other // key, both get and // insert fail. Return empty iterator if (counter++ >= hashtbl_size) { return end(); } key_type& existing_key = current_hash_bucket->first; volatile mapped_type& existing_value = current_hash_bucket->second; // Try and set the existing_key for the current hash bucket to insert_key const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key); // If old_key == unused_key, the current hash bucket was empty // and existing_key was updated to insert_key by the atomicCAS. // If old_key == insert_key, this key has already been inserted. // In either case, perform the atomic aggregation of existing_value and // insert_value // Because the hash table is initialized with the identity value of the // aggregation // operation, it is safe to perform the operation when the existing_value // still // has its initial value // TODO: Use template specialization to make use of native atomic // functions // TODO: How to handle data types less than 32 bits? // Situation #1: Empty slot: this key never exist in the table, ready to // insert. if (keys_equal(unused_key, old_key)) { // update_existing_value(existing_value, x, op); existing_value = (mapped_type)(atomicAdd(value_counter, 1)); break; } // Situation #2+#3: Target slot: This slot is the slot for this key else if (keys_equal(insert_key, old_key)) { while (existing_value == m_unused_element) { // Situation #2: This slot is inserting by another CUDA thread and the // value is not yet // ready, just wait } // Situation #3: This slot is already ready, get successfully and return // (iterator of) the // value break; } // Situation 4: Wrong slot: This slot is occupied by other key, get fail, // do nothing and // linear probing to next slot. current_index = (current_index + 1) % hashtbl_size; current_hash_bucket = &(hashtbl_values[current_index]); } return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size, current_hash_bucket); } int assign_async(const concurrent_unordered_map& other, cudaStream_t stream = 0) { m_collisions = other.m_collisions; if (other.m_hashtbl_size <= m_hashtbl_capacity) { m_hashtbl_size = other.m_hashtbl_size; } else { m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity); m_hashtbl_capacity = other.m_hashtbl_size; m_hashtbl_size = other.m_hashtbl_size; m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity); } CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values, m_hashtbl_size * sizeof(value_type), cudaMemcpyDefault, stream)); return 0; } void clear_async(cudaStream_t stream = 0) { constexpr int block_size = 128; init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0, stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element); if (count_collisions) m_collisions = 0; } unsigned long long get_num_collisions() const { return m_collisions; } void print() { for (size_type i = 0; i < 10; ++i) { std::cout << i << ": " << m_hashtbl_values[i].first << "," << m_hashtbl_values[i].second << std::endl; } } int prefetch(const int dev_id, cudaStream_t stream = 0) { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values); #if CUDART_VERSION >= 10000 if (cudaSuccess == status && hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged) #else if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged) #endif { CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, stream)); } CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream)); return 0; } template <class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ const_iterator accum(const value_type& x, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const key_type& dst_key = x.first; auto it = find(dst_key); if (it == end()) { return it; } value_type* dst = it.getter(); accum_existing_value_atomic(dst->second, x); return it; } private: const hasher m_hf; const key_equal m_equal; const mapped_type m_unused_element; allocator_type m_allocator; size_type m_hashtbl_size; size_type m_hashtbl_capacity; value_type* m_hashtbl_values; unsigned long long m_collisions; }; #endif // CONCURRENT_UNORDERED_MAP_CUH
bml_add_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_add.h" #include "../bml_allocate.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_add_ellpack.h" #include "bml_allocate_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Matrix addition. * * \f$ A = \alpha A + \beta B \f$ * * \ingroup add_group * * \param A Matrix A * \param B Matrix B * \param alpha Scalar factor multiplied by A * \param beta Scalar factor multiplied by B * \param threshold Threshold for matrix addition */ void TYPED_FUNC( bml_add_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B, double alpha, double beta, double threshold) { int N = A->N; int A_M = A->M; int B_M = B->M; int *A_nnz = A->nnz; int *A_index = A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int *B_nnz = B->nnz; int *B_index = B->index; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; int myRank = bml_getMyRank(); #if !(defined(__IBMC__) || defined(__ibmxl__)) int ix[N], jx[N]; REAL_T x[N]; memset(ix, 0, N * sizeof(int)); memset(jx, 0, N * sizeof(int)); memset(x, 0.0, N * sizeof(REAL_T)); #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) #else #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) \ firstprivate(ix, jx, x) #endif //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { #if defined(__IBMC__) || defined(__ibmxl__) int ix[N], jx[N]; REAL_T x[N]; memset(ix, 0, N * sizeof(int)); #endif int l = 0; if (alpha > (double) 0.0 || alpha < (double) 0.0) for (int jp = 0; jp < A_nnz[i]; jp++) { int k = A_index[ROWMAJOR(i, jp, N, A_M)]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; jx[l] = k; l++; } x[k] = x[k] + alpha * A_value[ROWMAJOR(i, jp, N, A_M)]; } if (beta > (double) 0.0 || beta < (double) 0.0) for (int jp = 0; jp < B_nnz[i]; jp++) { int k = B_index[ROWMAJOR(i, jp, N, B_M)]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; jx[l] = k; l++; } x[k] = x[k] + beta * B_value[ROWMAJOR(i, jp, N, B_M)]; } A_nnz[i] = l; int ll = 0; for (int jp = 0; jp < l; jp++) { int jind = jx[jp]; REAL_T xTmp = x[jind]; if (is_above_threshold(xTmp, threshold)) { A_value[ROWMAJOR(i, ll, N, A_M)] = xTmp; A_index[ROWMAJOR(i, ll, N, A_M)] = jind; ll++; } x[jind] = 0.0; ix[jind] = 0; } A_nnz[i] = ll; } } /** Matrix addition. * * \f$ A = \alpha A + \beta B \f$ * * \ingroup add_group * * \param A Matrix A * \param B Matrix B * \param alpha Scalar factor multiplied by A * \param beta Scalar factor multiplied by B * \param threshold Threshold for matrix addition */ double TYPED_FUNC( bml_add_norm_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B, double alpha, double beta, double threshold) { int N = A->N; int A_M = A->M; int B_M = B->M; int *A_nnz = A->nnz; int *A_index = A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int *B_nnz = B->nnz; int *B_index = B->index; int ind, ind2; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; double trnorm = 0.0; int myRank = bml_getMyRank(); #if !(defined(__IBMC__) || defined(__ibmxl__)) int ix[N], jx[N]; REAL_T x[N]; REAL_T y[N]; memset(ix, 0, N * sizeof(int)); memset(jx, 0, N * sizeof(int)); memset(x, 0.0, N * sizeof(REAL_T)); memset(y, 0.0, N * sizeof(REAL_T)); #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) \ reduction(+:trnorm) #else #pragma omp parallel for \ shared(N, A_M, B_M, myRank) \ shared(A_index, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax) \ shared(B_index, B_value, B_nnz) \ firstprivate(ix, jx, x, y) \ reduction(+:trnorm) #endif //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { #if defined(__IBMC__) || defined(__ibmxl__) int ix[N], jx[N]; REAL_T x[N]; REAL_T y[N]; memset(ix, 0, N * sizeof(int)); #endif int l = 0; for (int jp = 0; jp < A_nnz[i]; jp++) { int ind = ROWMAJOR(i, jp, N, A_M); int k = A_index[ind]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; y[k] = 0.0; //A_index[ROWMAJOR(i, l, N, A_M)] = k; jx[l] = k; l++; } x[k] = x[k] + alpha * A_value[ind]; y[k] = y[k] + A_value[ind]; } for (int jp = 0; jp < B_nnz[i]; jp++) { int ind = ROWMAJOR(i, jp, N, B_M); int k = B_index[ind]; if (ix[k] == 0) { x[k] = 0.0; ix[k] = i + 1; y[k] = 0.0; jx[l] = k; l++; } x[k] = x[k] + beta * B_value[ind]; y[k] = y[k] - B_value[ind]; } A_nnz[i] = l; int ll = 0; for (int jp = 0; jp < l; jp++) { int jind = jx[jp]; REAL_T xTmp = x[jind]; trnorm += y[jind] * y[jind]; if (is_above_threshold(xTmp, threshold)) { A_value[ROWMAJOR(i, ll, N, A_M)] = xTmp; A_index[ROWMAJOR(i, ll, N, A_M)] = jind; ll++; } x[jind] = 0.0; ix[jind] = 0; y[jind] = 0.0; } A_nnz[i] = ll; } return trnorm; } /** Matrix addition. * * A = A + beta * I * * \ingroup add_group * * \param A Matrix A * \param beta Scalar factor multiplied by I * \param threshold Threshold for matrix addition */ void TYPED_FUNC( bml_add_identity_ellpack) ( bml_matrix_ellpack_t * A, double beta, double threshold) { REAL_T alpha = (REAL_T) 1.0; bml_matrix_ellpack_t *Id = TYPED_FUNC(bml_identity_matrix_ellpack) (A->N, A->M, A->distribution_mode); TYPED_FUNC(bml_add_ellpack) (A, Id, alpha, beta, threshold); bml_deallocate_ellpack(Id); } /** Matrix addition. * * A = alpha * A + beta * I * * \ingroup add_group * * \param A Matrix A * \param alpha Scalar factor multiplied by A * \param beta Scalar factor multiplied by I * \param threshold Threshold for matrix addition */ void TYPED_FUNC( bml_scale_add_identity_ellpack) ( bml_matrix_ellpack_t * A, double alpha, double beta, double threshold) { bml_matrix_ellpack_t *Id = TYPED_FUNC(bml_identity_matrix_ellpack) (A->N, A->M, A->distribution_mode); TYPED_FUNC(bml_add_ellpack) (A, Id, alpha, beta, threshold); bml_deallocate_ellpack(Id); }
octnode.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <omp.h> #include "octnode.h" #include "brute_force.h" #define LEAF_SIZE 8 const short int child_signs[8][3] = {{-1, -1, -1},{-1, -1, 1}, {-1, 1, -1}, {-1, 1, 1}, {1, -1, -1}, {1, -1, 1}, {1, 1, -1}, {1, 1, 1}}; octnode* octnode_make(double w, double x, double y, double z) { octnode* nd = malloc(sizeof(octnode)); nd->w = w; nd->m = 0.; nd->r_x = x; nd->r_y = y; nd->r_z = z; nd->R_x = 0.; nd->R_y = 0.; nd->R_z = 0.; nd->leaf_cap = LEAF_SIZE; nd->pnts = malloc(LEAF_SIZE * sizeof(particle*)); for (int i = 0; i < LEAF_SIZE; i++) nd->pnts[i] = NULL; for (int i = 0; i < 8; i++) nd->children[i] = NULL; return nd; } void octnode_free(octnode* nd) { if (nd == NULL) return; for (int i = 0; i < 8; i++) { octnode_free(nd->children[i]); } if (nd->leaf_cap > 0) free(nd->pnts); free(nd); } void octnode_print(octnode* nd) { printf("<octnode: %f, %f, %f [%f, %d]>\n", nd->r_x, nd->r_y, nd->r_z, nd->w, nd->leaf_cap); } void octree_print(octnode* root) { octnode_print(root); for (int i = 0; i < 8; i++) { if (root->children[i] == NULL) continue; octree_print(root->children[i]); } } void octnode_insert_point(octnode* nd, particle* pnt) { if (nd->leaf_cap > 0) { nd->pnts[LEAF_SIZE - nd->leaf_cap] = pnt; nd->leaf_cap -= 1; nd->m += pnt->m; return; } else if (nd->leaf_cap == 0) { int n = LEAF_SIZE - nd->leaf_cap; for (int i = 0; i < n; i++) { int cid = octnode_get_child_id(nd, nd->pnts[i]->x, nd->pnts[i]->y, nd->pnts[i]->z); if (nd->children[cid] == NULL) { octnode_make_child(nd, cid); } octnode_insert_point(nd->children[cid], nd->pnts[i]); nd->R_x += nd->pnts[i]->x * nd->pnts[i]->m; nd->R_y += nd->pnts[i]->y * nd->pnts[i]->m; nd->R_z += nd->pnts[i]->z * nd->pnts[i]->m; } double ff = 1. / nd->m; nd->R_x *= ff; nd->R_y *= ff; nd->R_z *= ff; nd->leaf_cap -= 1; free(nd->pnts); } if (nd->leaf_cap == -1) { double pmass = pnt->m; double k1 = pmass / (nd->m + pmass); double k2 = nd->m / (nd->m + pmass); nd->R_x *= k2; nd->R_y *= k2; nd->R_z *= k2; nd->R_x += pnt->x * k1; nd->R_y += pnt->y * k1; nd->R_z += pnt->z * k1; nd->m += pmass; int cid = octnode_get_child_id(nd, pnt->x, pnt->y, pnt->z); if (nd->children[cid] == NULL) { octnode_make_child(nd, cid); } octnode_insert_point(nd->children[cid], pnt); } } int octnode_get_child_id(octnode* nd, double x, double y, double z) { int idx = (x >= nd->r_x) << 2 | (y >= nd->r_y) << 1 | (z >= nd->r_z); return idx; } void octnode_make_child(octnode* nd, int idx) { double k = 0.25 * nd->w; double x = nd->r_x + k * child_signs[idx][0]; double y = nd->r_y + k * child_signs[idx][1]; double z = nd->r_z + k * child_signs[idx][2]; octnode* child = octnode_make(0.5 * nd->w, x, y, z); nd->children[idx] = child; } void octree_build(octnode* root, particle** pcont, int n) { for (int i = 0; i < n; ++i) { octnode_insert_point(root, pcont[i]); } } void octree_build_omp(octnode* root, particle** pcont, int n) { if (n <= LEAF_SIZE) { octree_build(root, pcont, n); return; } int max_threads = omp_get_max_threads(); int n_cores = (max_threads <= 8) ? max_threads : 8; int* child_ids = malloc(n * sizeof(int)); int** thread_assign = malloc(n_cores * sizeof(int*)); int* thread_assign_cnt = malloc(n_cores * sizeof(int)); for (int i = 0; i < n_cores; i++) { thread_assign[i] = malloc(n * sizeof(int)); thread_assign_cnt[i] = 0; } for (int i = 0; i < n; i++) { int cid = octnode_get_child_id(root, pcont[i]->x, pcont[i]->y, pcont[i]->z); if (root->children[cid] == NULL) { octnode_make_child(root, cid); } child_ids[i] = cid; int tid = cid % n_cores; thread_assign[tid][thread_assign_cnt[tid]] = i; thread_assign_cnt[tid] += 1; } #pragma omp parallel num_threads(n_cores) { int tid = omp_get_thread_num(); int k = thread_assign_cnt[tid]; int* t_arr = thread_assign[tid]; for (int i = 0; i < k; i++) { int pid = t_arr[i]; octnode_insert_point(root->children[child_ids[pid]], pcont[pid]); } } root->leaf_cap = -1; for (int i = 0; i < 8; i++) { if (root->children[i] == NULL) continue; root->m += root->children[i]->m; root->R_x += root->children[i]->R_x * root->children[i]->m; root->R_y += root->children[i]->R_y * root->children[i]->m; root->R_z += root->children[i]->R_z * root->children[i]->m; } double ff = 1. / root->m; root->R_x *= ff; root->R_y *= ff; root->R_z *= ff; free(child_ids); free(thread_assign_cnt); for (int i = 0; i < n_cores; i++) { free(thread_assign[i]); } free(thread_assign); } void octree_calc_accs(octnode* nd, particle** psub, int k, params* par) { if (k == 0) return; double d, f, mac, dx, dy, dz, d_squared, mac_squared; int cnt; if (nd->leaf_cap >= 0) { int n = LEAF_SIZE - nd->leaf_cap; for (int i = 0; i < k; i++) { for (int j = 0; j < n; j++) { calc_accs_particles(psub[i], nd->pnts[j], par); } } return; } cnt = 0; mac = nd->w / par->theta; mac_squared = mac * mac; particle** new_psub = malloc(k * sizeof(particle*)); for (int i = 0; i < k; ++i) { particle* part = psub[i]; dx = nd->R_x - part->x; dy = nd->R_y - part->y; dz = nd->R_z - part->z; d_squared = dx * dx + dy * dy + dz * dz; // case 1: MAC satisfied if (d_squared > mac_squared) { d = sqrt(d_squared); f = par->G * nd->m / (d_squared * d + par->eps); part->a_x += f * dx; part->a_y += f * dy; part->a_z += f * dz; } // case 2: MAC not satisfied else { new_psub[cnt] = part; cnt++; } } for (int i = 0; i < 8; ++i) { if (nd->children[i] == NULL) continue; octree_calc_accs(nd->children[i], new_psub, cnt, par); } free(new_psub); } void octree_calc_accs_omp(octnode* nd, particle** psub, int k, params* par) { if (k <= LEAF_SIZE) { calc_accs(psub, k, par); return; } int n_cores = omp_get_max_threads(); int chunk_size = k / n_cores; # pragma omp parallel num_threads(n_cores) { int tid, start_ind, end_ind, m; tid = omp_get_thread_num(); start_ind = tid * chunk_size; end_ind = (tid < n_cores - 1) ? (tid + 1) * chunk_size : k; m = end_ind - start_ind; particle** new_psub = make_slice(psub, start_ind, end_ind); octree_calc_accs(nd, new_psub, m, par); free(new_psub); } } double* calc_accs_wrap(int n, double* points, double* masses, double G, double eps, double theta, double root_width, double root_x, double root_y, double root_z) { particle** pcont = make_from_arrays(n, points, masses); octnode* root = octnode_make(root_width, root_x, root_y, root_z); params* par = params_make(G, eps, theta); octree_build_omp(root, pcont, n); octree_calc_accs_omp(root, pcont, n, par); double* accs = accs_from_pcont(pcont, n); free(par); pcont_free(pcont, n); octnode_free(root); return accs; }
omp_taskloop_taskwait.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> int main() { enum {ITERS = 500}; enum {SIZE = 5}; int err = 0; #pragma omp parallel num_threads(2) reduction(+:err) { int r = 0; int i; #pragma omp taskloop grainsize(SIZE) shared(r) nogroup for(i=0; i<ITERS; i++) { #pragma omp atomic ++r; } #pragma omp taskwait printf("%d\n", r); if (r != ITERS) err++; } // end of parallel if (err != 0) { printf("failed, err = %d\n", err); return 1; } else { printf("passed\n"); return 0; } }
convolution_1x1_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-8a-inch/8a-outch/4 kernel_tm_pack8to4.create(4 * 8, inch / 8, outch / 8 + (outch % 8) / 4, (size_t)2u * 2, 2); int p = 0; for (; p + 7 < outch; p += 8) { const float* k0 = (const float*)kernel + (p + 0) * inch; const float* k1 = (const float*)kernel + (p + 1) * inch; const float* k2 = (const float*)kernel + (p + 2) * inch; const float* k3 = (const float*)kernel + (p + 3) * inch; const float* k4 = (const float*)kernel + (p + 4) * inch; const float* k5 = (const float*)kernel + (p + 5) * inch; const float* k6 = (const float*)kernel + (p + 6) * inch; const float* k7 = (const float*)kernel + (p + 7) * inch; __fp16* g0 = kernel_tm_pack8to4.channel(p / 8); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0[4] = (__fp16)k4[i]; g0[5] = (__fp16)k5[i]; g0[6] = (__fp16)k6[i]; g0[7] = (__fp16)k7[i]; g0 += 8; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; k4 += 8; k5 += 8; k6 += 8; k7 += 8; } } for (; p + 3 < outch; p += 4) { const float* k0 = (const float*)kernel + (p + 0) * inch; const float* k1 = (const float*)kernel + (p + 1) * inch; const float* k2 = (const float*)kernel + (p + 2) * inch; const float* k3 = (const float*)kernel + (p + 3) * inch; __fp16* g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0 += 4; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; } } } static void conv1x1s1_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start = 0; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 8; } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "mov v28.16b, %10.16b \n" "mov v29.16b, %10.16b \n" "mov v30.16b, %10.16b \n" "mov v31.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2); float16x8_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; tmpptr += 8; } vst1_f16(outptr0, vget_low_f16(_sum0)); vst1_f16(outptr1, vget_high_f16(_sum0)); outptr0 += 4; outptr1 += 4; } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x4_t _bias0 = vld1_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "mov v28.16b, %8.16b \n" "mov v29.16b, %8.16b \n" "mov v30.16b, %8.16b \n" "mov v31.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); float16x4_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; tmpptr += 8; } vst1_f16(outptr0, _sum0); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
noWait.c
// OpenMP NoWait Example // Inclusions #include <omp.h> #include <stdio.h> #include <stdlib.h> // Main int main( int argc, char** argv ) { int i = 0; // Loop Iterator int n = 0; // Number of Iterations double start = 0.0; // Start Time double middle = 0.0; // Middle Time double end = 0.0; // End Time double for1 = 0.0; // For Loop 1 Time double for2 = 0.0; // For Loop 2 Time double total = 0.0; // Total Time // Parallel Region Start #pragma omp parallel \ shared( n ) \ private( i ) { start = omp_get_wtime( ); // Get Start Time #pragma omp for nowait // Parallelize For Loop - Don't Wait for All to End for( i = 0; i < n; i++ ) { // Interate Through printf( "Thread %d of %d - Iteration %d\n", omp_get_thread_num( ), omp_get_max_threads( ), i ); } middle = omp_get_wtime( ); // Get Middle Time #pragma omp for nowait // Parallelize For Loop - Don't Wait for All to End for( i = 0; i < n; i++ ) { // Iterate Through printf( "Thread %d of %d - Iteration %d\n", omp_get_thread_num( ), omp_get_max_threads( ), i ); } end = omp_get_wtime( ); // Get End Time } // Calcule Time for1 = middle - start; for2 = end - middle; total = end - start; // Display Time printf( "For Loop 1: %0.9lf\n", for1 ); printf( "For Loop 2: %0.9lf\n", for2 ); printf( "Total Time: %0.9lf\n", total ); return 0; } // End noWait.c - EWG SDG