source
stringlengths
3
92
c
stringlengths
26
2.25M
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__bor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bor_uint16 // A.*B function (eWiseMult): GB_AemultB__bor_uint16 // A*D function (colscale): GB_AxD__bor_uint16 // D*A function (rowscale): GB_DxB__bor_uint16 // C+=B function (dense accum): GB_Cdense_accumB__bor_uint16 // C+=b function (dense accum): GB_Cdense_accumb__bor_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_uint16 // C=scalar+B GB_bind1st__bor_uint16 // C=scalar+B' GB_bind1st_tran__bor_uint16 // C=A+scalar GB_bind2nd__bor_uint16 // C=A'+scalar GB_bind2nd_tran__bor_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x) | (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_UINT16 || GxB_NO_BOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bor_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bor_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bor_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__bor_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__bor_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bor_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bor_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bor_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bor_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB_bind1st_tran__bor_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB_bind2nd_tran__bor_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1.race5.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(guided) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i - 1][j - 1]; } // CHECK: Data Race detected // END
Bvh.h
#ifndef GAME_ENGINE_BVH_H #define GAME_ENGINE_BVH_H #include "CoreLib/Basic.h" #include "CoreLib/Graphics/BBox.h" #include "Ray.h" namespace GameEngine { using namespace CoreLib::Basic; const int nBuckets = 16; class BvhNode { public: CoreLib::Graphics::BBox Bounds; unsigned int Axis : 2; unsigned int SkipBBoxTest : 1; int ElementCount : 29; union { int ElementId; int ChildOffset; }; inline bool GetIsLeaf() { return ElementCount != 0; } inline int GetElementCount() { return ElementCount; } }; template<typename T> class BvhNode_Build { public: CoreLib::Graphics::BBox Bounds; int Axis; T** Elements; int ElementCount; BvhNode_Build* Children[2]; void AllocElements(int count) { Elements = (T**)malloc(count * sizeof(T*)); ElementCount = count; } void FreeElements() { if (Elements) { free(Elements); Elements = 0; } } BvhNode_Build() { Children[0] = 0; Children[1] = 0; Axis = 0; ElementCount = 0; Elements = 0; } ~BvhNode_Build() { if (Children[0]) delete Children[0]; if (Children[1]) delete Children[1]; FreeElements(); } }; template<typename T> class Bvh_Build { public: CoreLib::RefPtr<BvhNode_Build<T>> Root; int ElementListSize; int NodeCount = 0; }; template<typename T> class Bvh { private: int FlattenNodes(BvhNode_Build<T> * node) { int id = Nodes.Count(); BvhNode n; n.Axis = node->Axis; n.Bounds = node->Bounds; if (node->Elements == 0) n.ElementCount = 0; else n.ElementCount = node->ElementCount; n.SkipBBoxTest = 0; Nodes.Add(n); if (node->Elements == 0) { FlattenNodes(node->Children[0]); Nodes[id].ChildOffset = FlattenNodes(node->Children[1]) - id; } else { Nodes[id].ElementId = Elements.Count(); for (int i = 0; i < node->ElementCount; i++) Elements.Add(*node->Elements[i]); } return id; } public: CoreLib::List<BvhNode> Nodes; CoreLib::List<T> Elements; void FromBuild(Bvh_Build<T> &bvh) { Nodes.Clear(); Elements.Clear(); Nodes.Reserve((int)bvh.NodeCount); Elements.Reserve((int)bvh.ElementListSize); FlattenNodes(bvh.Root.operator->()); } }; template<typename T> class BuildData { public: T * Element; CoreLib::Graphics::BBox Bounds; VectorMath::Vec3 Center; }; inline float SurfaceArea(CoreLib::Graphics::BBox & box) { return ((box.xMax - box.xMin)*(box.yMax - box.yMin) + (box.xMax - box.xMin)*(box.zMax - box.zMax) + (box.yMax - box.yMin)*(box.zMax - box.zMin))*2.0f; } struct BucketInfo { int count; CoreLib::Graphics::BBox bounds; BucketInfo() { count = 0; bounds.Init(); } }; template<typename T, typename CostEvaluator> BvhNode_Build<T> * ConstructBvhNodeNonRec(BuildData<T>* elements, int elementCount, int & elementListSize, int & nodeCount, CostEvaluator & eval) { struct BvhJob { BvhNode_Build<T> ** result; BuildData<T>* elements; int elementCount; BvhJob() {} BvhJob(BvhNode_Build<T> ** result, BuildData<T>* elements, int elementCount) { this->elements = elements; this->elementCount = elementCount; this->result = result; } }; const int stackSize = 256; BvhJob stack[stackSize]; int stackPtr = 0; auto pushJob = [&](BvhNode_Build<T> ** result, BuildData<T>* elements, int elementCount) { BvhJob job(result, elements, elementCount); if (stackPtr < stackSize) stack[stackPtr++] = job; else throw "stack overflow"; }; auto popJob = [&]()->BvhJob { if (stackPtr) return stack[--stackPtr]; else throw "stack empty"; }; BvhNode_Build<T> * rs = 0; nodeCount = 0; elementListSize = 0; BvhJob job(&rs, elements, elementCount); while (true) { BvhNode_Build<T> * node = new BvhNode_Build<T>(); nodeCount++; (*job.result) = node; BuildData<T>* jElements = job.elements; int jElementCount = job.elementCount; if (jElementCount == 0) { printf("elementCount = 0 !"); throw 0; } if (jElementCount == 1 || stackPtr == stackSize) { node->Bounds = jElements->Bounds; node->AllocElements((int)jElementCount); for (int i = 0; i < (int)jElementCount; i++) { node->Elements[i] = jElements[i].Element; } elementListSize += jElementCount; if (!stackPtr) break; else job = popJob(); continue; } else { CoreLib::Graphics::BBox centroidBounds; CoreLib::Graphics::BBox bbox; centroidBounds.Init(); bbox.Init(); for (int i = 0; i < jElementCount; i++) { centroidBounds.Union(jElements[i].Center); bbox.Union(jElements[i].Bounds); } node->Bounds = bbox; int dim = centroidBounds.MaxDimension(); if (centroidBounds.Min[dim] == centroidBounds.Max[dim]) { node->Bounds = bbox; node->AllocElements((int)jElementCount); for (int i = 0; i < (int)jElementCount; i++) { node->Elements[i] = jElements[i].Element; } elementListSize += jElementCount; if (!stackPtr) break; else job = popJob(); continue; } BucketInfo buckets[nBuckets]; for (int i = 0; i < jElementCount; i++) { int b = (int)(nBuckets * ((jElements[i].Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; buckets[b].count++; buckets[b].bounds.Union(jElements[i].Bounds); } float minCost = FLT_MAX; int minCostSplit = 0; CoreLib::Graphics::BBox bounds1[nBuckets - 1]; bounds1[nBuckets - 2] = buckets[nBuckets - 1].bounds; for (int i = nBuckets - 3; i >= 0; i--) { bounds1[i].Init(); bounds1[i].Union(buckets[i + 1].bounds); bounds1[i].Union(bounds1[i + 1]); } CoreLib::Graphics::BBox b0; b0.Init(); int count0 = 0; for (int i = 0; i < nBuckets - 1; i++) { b0.Union(buckets[i].bounds); count0 += buckets[i].count; int count1 = (int)jElementCount - count0; float cost = eval.EvalCost(count0, SurfaceArea(b0), count1, SurfaceArea(bounds1[i]), SurfaceArea(bbox)); if (cost < minCost) { minCost = cost; minCostSplit = i; } } if (jElementCount > CostEvaluator::ElementsPerNode || minCost < jElementCount) { BuildData<T> *pmid = std::partition(jElements, jElements + jElementCount, [&](const BuildData<T> &p) { int b = (int)(nBuckets * ((p.Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; return b <= minCostSplit; }); node->Axis = dim; job = BvhJob(node->Children, jElements, (int)(pmid - jElements)); pushJob(node->Children + 1, pmid, (int)(jElements + jElementCount - pmid)); } else { node->AllocElements((int)jElementCount); node->Bounds = bbox; for (int i = 0; i < (int)jElementCount; i++) { node->Elements[i] = jElements[i].Element; } elementListSize += jElementCount; if (!stackPtr) break; else job = popJob(); continue; } } } return rs; } template<typename T, typename CostEvaluator> BvhNode_Build<T> * ConstructBvhNode(Bvh_Build<T> & tree, BuildData<T>* elements, int elementCount, int & elementListSize, int & nodeCount, CostEvaluator & eval, int depth) { BvhNode_Build<T> * node = new BvhNode_Build<T>(); nodeCount = 1; elementListSize = 0; if (elementCount == 1 || depth == 61) { node->Bounds = elements->Bounds; node->AllocElements(1); node->Elements[0] = elements->Element; elementListSize = 1; return node; } else { CoreLib::Graphics::BBox centroidBounds; CoreLib::Graphics::BBox bbox; centroidBounds.Init(); bbox.Init(); for (int i = 0; i < elementCount; i++) { centroidBounds.Union(elements[i].Center); bbox.Union(elements[i].Bounds); } node->Bounds = bbox; int dim = centroidBounds.MaxDimension(); if (centroidBounds.Min[dim] == centroidBounds.Max[dim]) { node->Bounds = bbox; node->AllocElements((int)elementCount); for (int i = 0; i < (int)elementCount; i++) { node->Elements[i] = elements[i].Element; } elementListSize = elementCount; return node; } BucketInfo buckets[nBuckets]; if (elementCount > (2 << 12)) { const int processorCount = 16; BucketInfo buckets_proc[processorCount][nBuckets]; int blockSize = (int)(elementCount / processorCount); #pragma omp parallel for for (int procId = 0; procId < processorCount; procId++) { int end; if (procId == processorCount - 1) end = (int)elementCount; else end = (procId + 1)*blockSize; for (int i = procId * blockSize; i < end; i++) { int b = (int)(nBuckets * ((elements[i].Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; buckets_proc[procId][b].count++; buckets_proc[procId][b].bounds.Union(elements[i].Bounds); } } for (int i = 0; i < nBuckets; i++) { for (int j = 0; j < processorCount; j++) { buckets[i].count += buckets_proc[j][i].count; buckets[i].bounds.Union(buckets_proc[j][i].bounds); } } } else { for (int i = 0; i < elementCount; i++) { int b = (int)(nBuckets * ((elements[i].Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; buckets[b].count++; buckets[b].bounds.Union(elements[i].Bounds); } } CoreLib::Graphics::BBox bounds1[nBuckets - 1]; bounds1[nBuckets - 2] = buckets[nBuckets - 1].bounds; for (int i = nBuckets - 3; i >= 0; i--) { bounds1[i].Init(); bounds1[i].Union(buckets[i + 1].bounds); bounds1[i].Union(bounds1[i + 1]); } CoreLib::Graphics::BBox b0; b0.Init(); int count0 = 0; float minCost = FLT_MAX; int minCostSplit = 0; for (int i = 0; i < nBuckets - 1; i++) { b0.Union(buckets[i].bounds); count0 += buckets[i].count; int count1 = (int)elementCount - count0; float cost = eval.EvalCost(count0, SurfaceArea(b0), count1, SurfaceArea(bounds1[i]), SurfaceArea(bbox)); if (cost < minCost) { minCost = cost; minCostSplit = i; } } if (elementCount > CostEvaluator::ElementsPerNode || minCost < elementCount) { BuildData<T> *pmid = std::partition(elements, elements + elementCount, [&](const BuildData<T> &p) { int b = (int)(nBuckets * ((p.Center[dim] - centroidBounds.Min[dim]) / (centroidBounds.Max[dim] - centroidBounds.Min[dim]))); if (b == nBuckets) b = nBuckets - 1; return b <= minCostSplit; }); node->Axis = dim; int listSize1, listSize2; int nodeCount1, nodeCount2; if (depth > 8) { node->Children[0] = ConstructBvhNodeNonRec<T, CostEvaluator>(elements, (int)(pmid - elements), listSize1, nodeCount1, eval); node->Children[1] = ConstructBvhNodeNonRec<T, CostEvaluator>(pmid, (int)(elements + elementCount - pmid), listSize2, nodeCount2, eval); } else { #pragma omp parallel sections { #pragma omp section node->Children[0] = ConstructBvhNode<T, CostEvaluator>(tree, elements, (int)(pmid - elements), listSize1, nodeCount1, eval, depth + 1); #pragma omp section node->Children[1] = ConstructBvhNode<T, CostEvaluator>(tree, pmid, (int)(elements + elementCount - pmid), listSize2, nodeCount2, eval, depth + 1); } } node->ElementCount = (int)(elementListSize = listSize1 + listSize2); nodeCount += nodeCount1 + nodeCount2; } else { node->AllocElements((int)elementCount); node->Bounds = bbox; for (int i = 0; i < (int)elementCount; i++) { node->Elements[i] = elements[i].Element; } elementListSize = elementCount; } return node; } } template<typename T, typename CostEvaluator> void ConstructBvh(Bvh_Build<T> & tree, BuildData<T>* elements, int elementCount, CostEvaluator & eval) { tree.Root = ConstructBvhNode<T, CostEvaluator>(tree, elements, elementCount, tree.ElementListSize, tree.NodeCount, eval, 0); } template<typename T, typename Tracer, typename THit, bool pred> bool TraverseBvh(const Tracer & tracer, THit& rs, Bvh<T> & tree, const Ray & ray, VectorMath::Vec3 rcpDir) { bool hit = false; float tmax = ray.tMax; int dirIsNeg[3] = { rcpDir.x < 0, rcpDir.y < 0, rcpDir.z < 0 }; BvhNode* node = tree.Nodes.Buffer(); int todoOffset = 0; BvhNode* todo[256]; auto traceRay = ray; while (true) { float t1, t2; if (RayBBoxIntersection_RcpDir(node->Bounds, ray.Origin, rcpDir, t1, t2) && t1 < traceRay.tMax) { if (node->ElementCount > 0) { THit inter; for (int i = node->ElementId; i < node->ElementId + node->ElementCount; i++) { if (tracer.Trace(inter, tree.Elements[i], traceRay, tmax)) { if (pred) return true; if (tmax <= traceRay.tMax) { rs = inter; traceRay.tMax = tmax; hit = true; } } } if (todoOffset == 0) break; node = todo[--todoOffset]; } else { if (ray.Origin[node->Axis] > (node + 1)->Bounds.Max[node->Axis]) { todo[todoOffset++] = node + 1; node = node + node->ChildOffset; } else { todo[todoOffset++] = node + node->ChildOffset; node = node + 1; } } } else { if (todoOffset == 0) break; node = todo[--todoOffset]; } } return hit; } } #endif
25_omp_stack.c
// clang-format off // RUN: %run %s --omp 2>&1 | FileCheck %s --check-prefix=CHECK-TSAN // RUN: %run %s --omp 2>&1 | FileCheck %s // REQUIRES: openmp // clang-format on void f() { char c[4]; double d = 5; } int main(int argc, char** argv) { // CHECK: [Trace] TypeART Runtime Trace #pragma omp parallel sections { #pragma omp section f(); #pragma omp section f(); } // CHECK-TSAN-NOT: ThreadSanitizer // CHECK-NOT: Error // CHECK: [Trace] Free 0x{{.*}} 0 int8 1 4 // CHECK-DAG: [Trace] Free 0x{{.*}} 6 double 8 1 // CHECK-DAG: [Trace] Free 0x{{.*}} 0 int8 1 4 // CHECK-DAG: [Trace] Free 0x{{.*}} 6 double 8 1 return 0; }
A069675_extra.h
// Note the omp parallel for requires -fopenmp at compile time // yields ~10x speedup. #include <gmpxx.h> #include <atomic> #include <cassert> #include <cmath> #include <cstdio> #include <iostream> #include <fstream> #include "A069675_config.h" using namespace std; void AssertDivisible(int a, int d, int b, long p) { mpz_class t = 10; mpz_class mod = p; mpz_powm_ui(t.get_mpz_t(), t.get_mpz_t(), d, mod.get_mpz_t()); t *= a; t += b; t %= mod; if (t != 0) { cout << "WHAT: " << a << " " << d << " " << b << " " << p << endl; } assert(t == 0); } int FilterSimple() { long filtered = 0; // Filter divisible by 2 and 5 mods for (int a = 1; a <= 9; a += 1) { for (int test_d = 1; test_d <= MAX_DIGITS; test_d += 1) { is_prime[test_d][a][2] = 2; is_prime[test_d][a][4] = 2; is_prime[test_d][a][6] = 2; is_prime[test_d][a][8] = 2; is_prime[test_d][a][5] = 5; filtered += 5; } } // Filter divisible by 3 mods. for (int a = 1; a <= 9; a += 1) { for (int b = 1; b <= 9; b += 2) { if ((a + b) % 3 == 0) { for (int test_d = 1; test_d <= MAX_DIGITS; test_d += 1) { // assert a * pow(10, test_d, 3) + b % 3 == 0 if (is_prime[test_d][a][b] == 0) { is_prime[test_d][a][b] = 3; filtered++; } } } } } { // Filter simple, divisible by 7. // 10 ** d % 7 = 1,3,2,6,4,5, (repeats) int ten_d_mod_seven[] = {1,3,2,6,4,5}; for (int test_d = 1; test_d <= MAX_DIGITS; test_d += 1) { int d_mod = ten_d_mod_seven[test_d % 6]; for (int a = 1; a <= 9; a += 1) { for (int b = 1; b <= 9; b += 2) { if ((a * d_mod + b) % 7 == 0 && is_prime[test_d][a][b] == 0) { is_prime[test_d][a][b] = 7; filtered++; } } } } } for (int test_d = 3; test_d <= MAX_DIGITS; test_d += 1) { // See logic on Fermat primes: // a^b + 1 can only be prime if b has no odd divisors // => b is a power of two. bool is_power_two = (test_d & (test_d - 1)) == 0; if (!is_power_two) { is_prime[test_d][1][1] = -2; // Not prime but factor is unknown. filtered++; } } return filtered; } void FilterStats() { int total = 0; int total_to_test = 0; int filtered = 0; int filtered_trivial = 0; for (int d = START_DIGIT; d <= MAX_DIGITS; d++) { for (int a = 1; a <= 9; a++) { for (int b = 1; b <= 9; b++) { long status = is_prime[d][a][b]; assert(status >= 0 || status == -2); total += 1; total_to_test += status == 0; filtered_trivial += (status >= 2 && status <= 7); filtered += status != 0; } } } cout << endl; printf("%d total, %d trivially filtered, %d total filtered\n", total, filtered_trivial, filtered); printf("\t%d to test (%.3f non-trivial remaining)\n", total_to_test, 1.0 * total_to_test / (total - filtered_trivial)); } void VerifyFilter() { atomic<long> verified(0); atomic<long> negative(0); // No longer tests that things weren't missed, only that all entries divide. #pragma omp parallel for schedule( dynamic ) for (int d = START_DIGIT; d <= MAX_DIGITS; d++) { if (d <= 10) { continue; } long v = 0; long n = 0; mpz_class ten = 10; mpz_pow_ui(ten.get_mpz_t(), ten.get_mpz_t(), d); for (long a = 1; a <= 9; a++) { for (long b = 1; b <= 9; b++) { // TODO: Deal with a * pow_ten_mod_p * b == p long status = is_prime[d][a][b]; long p = status; if (status > 0) { v += 1; mpz_class modulo = (a * ten + b) % p; if (modulo != 0) { cout << "ERROR: " << a << " * 10^" << d << " + " << b << " % " << p << " == " << modulo << endl; } } else if (status < 0) { n += 1; } } } verified += v; negative += n; } printf("Verified %ld entries, skipped %ld negative entries\n", verified.load(), negative.load()); } string FileName(string ext) { return to_string(START_DIGIT) + "_" + to_string(MAX_DIGITS) + "." + ext; } void SaveFilter() { auto file_name = FileName("filter"); cout << "\tSaving to: " << file_name << endl; ofstream fs(file_name, ios::out | ios::trunc); // TODO: Record what prime divided filtered items. // TODO: same format as tester.cpp int count = 0; for (int d = START_DIGIT; d <= MAX_DIGITS; d++) { for (long a = 1; a <= 9; a++) { for (long b = 1; b <= 9; b += 2) { long status = is_prime[d][a][b]; // These get "loaded" with FilterTrivial if ((status >= 2 && status <= 7)) { continue; } if (status != 0) { fs << d << "," << a << "," << b << ":" << status << endl; count += 1; } } } } fs.close(); cout << endl; } void LoadPartial(string ext) { auto file_name = FileName(ext); cout << "\t\tReading from: " << file_name << endl; FILE *fs = fopen(file_name.c_str(), "r"); if (!fs) { cout << "Didn't find \"" << file_name << "\"" << endl; return; } int testD, testA, testB; long testResult; while (4 == fscanf(fs, "%d,%d,%d:%ld", &testD, &testA, &testB, &testResult)) { assert(testResult != 0); assert(testD >= START_DIGIT && testD <= MAX_DIGITS); long cur = is_prime[testD][testA][testB]; if (cur != 0 && cur != testResult) { cout << "WHAT: " << testD << "," << testA << "," << testB << ": " << cur << " != " << testResult << endl; assert(false); } is_prime[testD][testA][testB] = testResult; } fclose(fs); }
dynamic_fmt.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2009-2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2009-2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * Generic 'scriptable' hash cracker for JtR * * Renamed and changed from md5_gen* to dynamic*. We handle MD5 and SHA1 * at the present time. More crypt types 'may' be added later. * Added SHA2 (SHA224, SHA256, SHA384, SHA512), GOST, Whirlpool crypt types. * Whirlpool use oSSSL if OPENSSL_VERSION_NUMBER >= 0x10000000, otherwise use sph_* code. * * There used to be a todo list, and other commenting here. It has been * moved to ./docs/dynamic_history.txt * * KNOWN issues, and things to do. * * 1. create a new optimize flag, MGF_PASS_AFTER_FIXEDSALT and * MGF_PASS_BEFORE_FIXEDSALT. Then create DynamicFunc__appendsalt_after_pass[12] * These would only be valid for a FIXED length salted format. Then * we can write the pass right into the buffer, and get_key() would read * it back from there, either skipping over the salt, or removing the salt * from the end. This would allow crypt($s.$p) and crypt($p.s) to be optimized * in the way of string loading, and many fewer buffer copies. So dyna_1 could * be optimized to something like: // dynamic_1 Joomla md5($p.$s) static DYNAMIC_primitive_funcp _Funcs_1[] = { //Flags=MGF_PASS_BEFORE_FIXEDSALT | MGF_SALTED // saltlen=3 (or whatever). This fixed size is 'key' DynamicFunc__appendsalt_after_pass1, DynamicFunc__crypt_md5, NULL }; * WELL, the fixed size salt, it 'may' not be key for the MGF_PASS_BEFORE_FIXEDSALT, * I think I can make that 'work' for variable sized salts. But for the * MGF_PASS_AFTER_FIXEDSALT, i.e. crypt($s.$p) the fixed size salt IS key. I would * like to store all PW's at salt_len offset in the buffer, and simply overwrite the * first part of each buffer with the salt, never moving the password after the first * time it is written. THEN it is very important this ONLY be allowed when we KNOW * the salt length ahead of time. * * 2. Change regen-salts to be generic. Add the logic to dynamic_fmt.c proper, and change * the fake-salts.c, and options so that 'generic' regen-salts can be done. */ #include <string.h> #include <time.h> #if AC_BUILT #include "autoconfig.h" #endif #include "arch.h" #if defined(SIMD_COEF_32) && !ARCH_LITTLE_ENDIAN #undef SIMD_COEF_32 #undef SIMD_COEF_64 #undef SIMD_PARA_MD5 #undef SIMD_PARA_MD4 #undef SIMD_PARA_SHA1 #undef SIMD_PARA_SHA256 #undef SIMD_PARA_SHA512 #define BITS ARCH_BITS_STR #endif #if !FAST_FORMATS_OMP #ifdef _OPENMP #define FORCE_THREAD_MD5_body #endif #undef _OPENMP #endif #ifndef DYNAMIC_DISABLED #ifdef SIMD_COEF_32 #include "simd-intrinsics.h" #endif #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "md5.h" #include "md4.h" #include "dynamic.h" #include "options.h" #include "config.h" #include "sha.h" #include "sha2.h" #include "gost.h" #include "sph_haval.h" #include "sph_ripemd.h" #include "sph_tiger.h" #include "sph_md2.h" #include "sph_panama.h" #include "sph_skein.h" #include "sph_whirlpool.h" #include "memory.h" #include "unicode.h" #include "johnswap.h" #include "crc32.h" #include "aligned.h" #include "fake_salts.h" #include "base64_convert.h" #if (AC_BUILT && HAVE_WHIRLPOOL) || \ (!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL) #include <openssl/whrlpool.h> #else // on my 32 bit cygwin builds, this code is about 4x slower than the oSSL code. #define WHIRLPOOL_CTX sph_whirlpool_context #define WHIRLPOOL_Init(a) sph_whirlpool_init(a) #define WHIRLPOOL_Update(a,b,c) sph_whirlpool(a,b,c) #define WHIRLPOOL_Final(a,b) sph_whirlpool_close(b,a) #endif #include "KeccakHash.h" #define KECCAK_CTX Keccak_HashInstance #define KECCAK_Update(a,b,c) Keccak_HashUpdate(a,b,(c)*8) #define KECCAK_Final(a,b) Keccak_HashFinal(b,a) #define KECCAK_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x01) #define KECCAK_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x01) // FIPS202 complient #define SHA3_224_Init(hash) Keccak_HashInitialize(hash, 1152, 448, 224, 0x06) #define SHA3_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x06) #define SHA3_384_Init(hash) Keccak_HashInitialize(hash, 832, 768, 384, 0x06) #define SHA3_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x06) #ifdef _OPENMP #include <omp.h> static unsigned int m_ompt; #endif #include "dynamic_types.h" #include "memdbg.h" #if (defined (_OPENMP)||defined(FORCE_THREAD_MD5_body)) && defined (_MSC_VER) unsigned DES_bs_max_kpc, DES_bs_min_kpc, DES_bs_all_p; #undef MD5_body extern void MD5_body(MD5_word x[15],MD5_word out[4]); #endif #define STRINGIZE2(s) #s #define STRINGIZE(s) STRINGIZE2(s) static struct fmt_main fmt_Dynamic; static struct fmt_main *pFmts; static int nFmts; static int nLocalFmts; static struct fmt_main *pLocalFmts; static int force_md5_ctx; static void dynamic_RESET(struct fmt_main *fmt); #define eLargeOut dyna_eLargeOut eLargeOut_t *eLargeOut; #define nLargeOff dyna_nLargeOff unsigned *nLargeOff; #if ARCH_LITTLE_ENDIAN #define MD5_swap(x, y, count) #define MD5_swap2(a,b,c,d,e) #else extern char *MD5_DumpHexStr(void *p); static void MD5_swap(MD5_word *x, MD5_word *y, int count) { do { *y++ = JOHNSWAP(*x++); } while (--count); } #if MD5_X2 static void MD5_swap2(MD5_word *x, MD5_word *x2, MD5_word *y, MD5_word *y2, int count) { do { *y++ = JOHNSWAP(*x++); *y2++ = JOHNSWAP(*x2++); } while (--count); } #endif #endif #define FORMAT_LABEL "dynamic" #define FORMAT_NAME "Generic MD5" #ifdef SIMD_COEF_32 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + ((i)&3) ) #define SHAGETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) ) //for endianity conversion #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_SIZE_SHA 20 #define BINARY_ALIGN MEM_ALIGN_WORD // Computation for 'salt_size' The salt (and salt2) is appended to the end of the hash entry. // The format of a salted entry is: $dynamic_#$hash$SALT_VAL[$$2SALT2_VAL] // salt 64 bytes, // salt2 64 bytes, // salt signature $ 1 byte // salt2 signature $$2 3 bytes // null termination 1 byte. This this allows 2 64 byte salt's. // Note, we now have up to 10 of these. #define SALT_SIZE (64*4+1+3+1) #define SALT_ALIGN MEM_ALIGN_WORD // slots to do 24 'tests'. Note, we copy the // same 3 tests over and over again. Simply to validate that // tests use 'multiple' blocks. static struct fmt_tests dynamic_tests[] = { {NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}, {NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}, {NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL} }; #ifdef SIMD_COEF_32 // SSE2 works only with 54 byte keys. Thus, md5(md5($p).md5($s)) can NOT be used // with the SSE2, since that final md5 will be over a 64 byte block of data. static union SIMD_inpup { uint32_t w[(64*SIMD_COEF_32)/sizeof(uint32_t)]; unsigned char c[64*SIMD_COEF_32]; } *input_buf, *input_buf2; static union SIMD_crypt { uint32_t w[(BINARY_SIZE*SIMD_COEF_32)/sizeof(uint32_t)]; unsigned char c[BINARY_SIZE*SIMD_COEF_32]; } *crypt_key, *crypt_key2; static unsigned int (*total_len)[SIMD_COEF_32]; static unsigned int (*total_len2)[SIMD_COEF_32]; #define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS) #define MMX_INP_BUF2_SZ (sizeof(input_buf2[0])*BLOCK_LOOPS) #define MMX_TOT_LEN_SZ (sizeof(*total_len) *BLOCK_LOOPS) #define MMX_TOT_LEN2_SZ (sizeof(*total_len2)*BLOCK_LOOPS) #define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS) #define MMX_CRYPT_KEY_SZ (sizeof(crypt_key[0]) *BLOCK_LOOPS+sizeof(crypt_key[0])) #define MMX_CRYPT_KEY2_SZ (sizeof(crypt_key2[0])*BLOCK_LOOPS) #endif #define FLAT_INP_BUF_SZ (sizeof(MD5_IN)*(MAX_KEYS_PER_CRYPT_X86>>MD5_X2)) #define FLAT_TOT_LEN_SZ (sizeof(unsigned int)*(MAX_KEYS_PER_CRYPT_X86)) MD5_OUT *crypt_key_X86; MD5_OUT *crypt_key2_X86; MD5_IN *input_buf_X86; MD5_IN *input_buf2_X86; unsigned int *total_len_X86; unsigned int *total_len2_X86; BIG_HASH_OUT dynamic_BHO[4]; static int keys_dirty; // We store the salt here static unsigned char *cursalt; // length of salt (so we don't have to call strlen() all the time. static int saltlen; int get_dynamic_fmt_saltlen() { return saltlen; } // This array is for the 2nd salt in the hash. I know of no hashes with double salts, // but test type dynamic_16 (which is 'fake') has 2 salts, and this is the data/code to // handle double salts. static unsigned char *cursalt2; static int saltlen2; static unsigned char *username; static int usernamelen; static unsigned char *flds[10]; static int fld_lens[10]; const char *dynamic_itoa16 = itoa16; #if !defined (_DEBUG) #define itoa16_w2 __Dynamic_itoa_w2 #define itoa16_w2_u __Dynamic_itoa_w2_u #define itoa16_w2_l __Dynamic_itoa_w2_l #endif unsigned short itoa16_w2_u[256], itoa16_w2_l[256]; unsigned short *itoa16_w2=itoa16_w2_l; // array of the keys. Also lengths of the keys. NOTE if store_keys_in_input, then the // key array will NOT be used (but the length array still is). #ifndef MAX_KEYS_PER_CRYPT #define MAX_KEYS_PER_CRYPT MAX_KEYS_PER_CRYPT_X86 #endif #ifndef PLAINTEXT_LENGTH #define PLAINTEXT_LENGTH PLAINTEXT_LENGTH_X86 #endif #define EFFECTIVE_MKPC (MAX_KEYS_PER_CRYPT > MAX_KEYS_PER_CRYPT_X86 ? MAX_KEYS_PER_CRYPT : MAX_KEYS_PER_CRYPT_X86) #define EFFECTIVE_MAX_LENGTH (PLAINTEXT_LENGTH > PLAINTEXT_LENGTH_X86 ? PLAINTEXT_LENGTH : PLAINTEXT_LENGTH_X86) // Used to compute length of each string to clean. This is needed, since we have to clean a little more than // just the length, IF we are cleaning strings that are in different endianity than native for the CPU. // This is seen on SHA224 (etc) on Intel, or MD5 of BE systems. We still try to clean 'only' as much as // we need to, but that is usually MORE than what the length of the stored string is. 8 gives us 7 byte spill // over, plus 1 byte for the 0x80 #define COMPUTE_EX_LEN(a) ( (a) > (sizeof(input_buf_X86[0].x1.b)-8) ) ? sizeof(input_buf_X86[0].x1.b) : ((a)+8) // this new 'ENCODED_EFFECTIVE_MAX_LENGTH' needed, since we grab up to 125 bytes of data WHEN in -encode:utf8 mode for a unicode format. #define ENCODED_EFFECTIVE_MAX_LENGTH (EFFECTIVE_MAX_LENGTH > 125 ? EFFECTIVE_MAX_LENGTH : 125) static char saved_key[EFFECTIVE_MKPC][ENCODED_EFFECTIVE_MAX_LENGTH + 1]; static int saved_key_len[EFFECTIVE_MKPC]; // this is the max generic location we should target. This keeps us from having blown MD buffers or overwrite // when in utf8->utf16 mode, where we are handling data that likely is larger than we should handle. We have to // handle this larger data, so that we get as many strings with 1 byte utf8 that would convert to data that would // blow our buffers. But we want as many as possible for the 2 and 3 byte utf data. #define MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE (256-17) // Used in 'get_key' if we are running in store_keys_in_input mode static char out[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; // This is the GLOBAL count of keys. ALL of the primitives which deal with a count // will read from this variable. #if !defined (_DEBUG) #define m_count m_Dynamic_Count #endif unsigned int m_count; // If we are run in 'specific' mode (say, -format=dynamic -subformat=dynamic_0, then we // want to 'allow' bare hashes to be 'valid'. This is how we will do this. We have a boolean // that if set to true, we will perform a 1 time check within the valid function. If at // that time we find out that we are cracking (or showing, etc) that we will accept lines // that are either format of $dynamic_0$hhhhhh...32 or simply in the format of hhhhhhh..32 int dynamic_allow_rawhash_fixup = 0; // this one IS in the private_dat, but since it is accessed SO much, we pull it // out prior to 'internal' processing. The others are accessed right from // the structure, since there are accessed infrequently enough to not matter. static int dynamic_use_sse; // If set to 1, then do unicode conversion is many string setting functions. static int *md5_unicode_convert; #if !defined (_DEBUG) #define curdat Dynamic_curdat #endif private_subformat_data curdat; // Helper function that loads out 256 unsigned short array that does base-16 conversions // This function is called at the 'validation' call that loads our preloads (i.e. only // called one time, pre 'run' (but will be called multiple times when benchmarking, but // will NOT impact benchmark times.) Loading a word at a time (2 bytes), sped up // the overall run time of dynamic_2 almost 5%, thus this conversion is MUCH faster than // the fastest byte by byte I could put together. I tested several ways to access this // array of unsigned shorts, and the best way was a 2 step method into an array of long // integer pointers (thus, load 1/2 the 32 bit word, then the other 1/2, into a 32 bit word). /********************************************************************************* ********************************************************************************* * Start of the 'normal' *_fmt code for md5-gen ********************************************************************************* *********************************************************************************/ char *RemoveHEX(char *output, char *input) { char *cpi = input; char *cpo = output; char *cpH = strstr(input, "$HEX$"); if (!cpH) { // should never get here, we have a check performed before this function is called. strcpy(output, input); return output; } while (cpi < cpH) *cpo++ = *cpi++; *cpo++ = *cpi; cpi += 5; while (*cpi) { if (*cpi == '0' && cpi[1] == '0') { strcpy(output, input); return output; } if (atoi16[ARCH_INDEX(*cpi)] != 0x7f && atoi16[ARCH_INDEX(cpi[1])] != 0x7f) { *cpo++ = atoi16[ARCH_INDEX(*cpi)]*16 + atoi16[ARCH_INDEX(cpi[1])]; cpi += 2; } else if (*cpi == '$') { while (*cpi && strncmp(cpi, "$HEX$", 5)) { *cpo++ = *cpi++; } if (!strncmp(cpi, "$HEX$", 5)) { *cpo++ = *cpi; cpi += 5; } } else { strcpy(output, input); return output; } } *cpo = 0; return output; } /********************************************************************************* * Detects a 'valid' md5-gen format. This function is NOT locked to anything. It * takes its detection logic from the provided fmt_main pointer. Within there, * is a 'private' data pointer. When john first loads the md5-gen, it calls a * function which builds proper 'private' data for EACH type of md5-gen. Then * john will call valid on EACH of those formats, asking each one if a string is * valid. Each format has a 'private' properly setup data object. *********************************************************************************/ static int valid(char *ciphertext, struct fmt_main *pFmt) { unsigned int i, cipherTextLen; char *cp, fixed_ciphertext[1024]; private_subformat_data *pPriv = pFmt->private.data; if (!pPriv) return 0; if (strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG))) return 0; /* Quick cancel of huge lines (eg. zip archives) */ if (strnlen(ciphertext, LINE_BUFFER_SIZE + 1) > LINE_BUFFER_SIZE) return 0; // this is now simply REMOVED totally, if we detect it. Doing this solves MANY other problems // of leaving it in there. The ONLY problem we still have is NULL bytes. if (strstr(ciphertext, "$HEX$")) { if (strnlen(ciphertext, sizeof(fixed_ciphertext) + 1) < sizeof(fixed_ciphertext)) ciphertext = RemoveHEX(fixed_ciphertext, ciphertext); } cp = &ciphertext[strlen(pPriv->dynamic_WHICH_TYPE_SIG)]; if (pPriv->dynamic_base64_inout == 1 || pPriv->dynamic_base64_inout == 3 || pPriv->dynamic_base64_inout == 5) { // jgypwqm.JsMssPLiS8YQ00$BaaaaaSX unsigned int len; len = base64_valid_length(cp, pPriv->dynamic_base64_inout==3?e_b64_mime:e_b64_crypt, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (len < 20 || len > pPriv->dynamic_SALT_OFFSET+4) return 0; if (pPriv->dynamic_FIXED_SALT_SIZE == 0) return !cp[len]; if (pPriv->dynamic_FIXED_SALT_SIZE && cp[len] != '$') return 0; if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[len+1]) != pPriv->dynamic_FIXED_SALT_SIZE) return 0; else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[len+1]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) return 0; return 1; } if (pPriv->dynamic_base64_inout == 2) { // h3mJrcH0901pqX/m$alex unsigned int i; for (i = 0; i < 16; ++i) { if (atoi64[ARCH_INDEX(cp[i])] == 0x7F) return 0; } if (pPriv->dynamic_FIXED_SALT_SIZE == 0) return !cp[i]; if (pPriv->dynamic_FIXED_SALT_SIZE && cp[16] != '$') return 0; if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[17]) != pPriv->dynamic_FIXED_SALT_SIZE) return 0; else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[17]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) return 0; if (strlen(cp) < 16) return 0; return 1; } if (strlen(cp) < 32) return 0; cipherTextLen = CIPHERTEXT_LENGTH; if (pPriv->dynamic_40_byte_input) { cipherTextLen = 40; } else if (pPriv->dynamic_48_byte_input) { cipherTextLen = 48; } else if (pPriv->dynamic_64_byte_input) { cipherTextLen = 64; } else if (pPriv->dynamic_56_byte_input) { cipherTextLen = 56; } else if (pPriv->dynamic_80_byte_input) { cipherTextLen = 80; } else if (pPriv->dynamic_96_byte_input) { cipherTextLen = 96; } else if (pPriv->dynamic_128_byte_input) { cipherTextLen = 128; } for (i = 0; i < cipherTextLen; i++) { if (atoi16[ARCH_INDEX(cp[i])] == 0x7f) return 0; } if ((pPriv->pSetup->flags&MGF_SALTED) == 0) { if (!cp[cipherTextLen]) return 1; return 0; } if (cp[cipherTextLen] && cp[cipherTextLen] != '$') return 0; // NOTE if looking at this in the future, this was not my fix. if (strlen(&cp[cipherTextLen]) > SALT_SIZE) return 0; // end NOTE. if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && ciphertext[pPriv->dynamic_SALT_OFFSET-1] != '$') return 0; if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) != pPriv->dynamic_FIXED_SALT_SIZE) { // first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then // validate length with this in mind. if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) { int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]); len = (len-4)>>1; if (len != pPriv->dynamic_FIXED_SALT_SIZE) return 0; } else { // check if there is a 'salt-2' or 'username', etc If that is the case, then this is still valid. if (strncmp(&ciphertext[pPriv->dynamic_SALT_OFFSET+pPriv->dynamic_FIXED_SALT_SIZE], "$$", 2)) return 0; } } else if (!regen_salts_options && pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) { char *cpX; // first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then // validate length with this in mind. if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) { int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]); len = (len-4)>>1; if (len > -(pPriv->dynamic_FIXED_SALT_SIZE)) return 0; } else { // check if there is a 'salt-2' or 'username', etc If that is the case, then this is still 'valid' cpX = mem_alloc(-(pPriv->dynamic_FIXED_SALT_SIZE) + 3); strnzcpy(cpX, &ciphertext[pPriv->dynamic_SALT_OFFSET], -(pPriv->dynamic_FIXED_SALT_SIZE) + 3); if (!strstr(cpX, "$$")) { MEM_FREE(cpX); return 0; } MEM_FREE(cpX); } } if (pPriv->b2Salts==1 && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$2")) return 0; if (pPriv->nUserName && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$U")) return 0; if (pPriv->FldMask) { for (i = 0; i < 10; ++i) { if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) { char Fld[8]; sprintf(Fld, "$$F%d", i); if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld)) return 0; } } } return 1; } static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv); static struct fmt_main *dynamic_Get_fmt_main(int which); static char *HandleCase(char *cp, int caseType); // 'wrapper' functions. These are here, so we can call these functions to work on ALL data (not simply within the // thead, which ONLY wants to work on a subset of the data. These functions should NOT be called by threading // code, EVER. But this functions KNOW what to do. Some actually have threads, others do not need them. #ifdef _OPENMP #ifndef SIMD_COEF_32 const unsigned int OMP_INC = (MD5_X2+1); const unsigned int OMP_MD5_INC = (MD5_X2+1); const unsigned int OMP_MD4_INC = (MD5_X2+1); const unsigned int OMP_SHA1_INC = (MD5_X2+1); #else const unsigned int OMP_INC = (MD5_X2+1); const unsigned int OMP_MD5_INC = (SIMD_PARA_MD5*SIMD_COEF_32); const unsigned int OMP_MD4_INC = (SIMD_PARA_MD4*SIMD_COEF_32); const unsigned int OMP_SHA1_INC = (SIMD_PARA_SHA1*SIMD_COEF_32); #endif // SIMD_COEF_32 #endif // _OPENMP inline static void __nonMP_DynamicFunc__SSEtoX86_switch_output2() { #ifdef _OPENMP DynamicFunc__SSEtoX86_switch_output2(0,m_count,0); #else DynamicFunc__SSEtoX86_switch_output2(); #endif } inline static void __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16() { #ifdef _OPENMP DynamicFunc__append_from_last_output2_to_input1_as_base16(0,m_count,0); #else DynamicFunc__append_from_last_output2_to_input1_as_base16(); #endif } void __nonMP_eLargeOut(eLargeOut_t what) { #ifdef _OPENMP unsigned int i; for (i = 1; i < m_ompt; ++i) eLargeOut[i] = what; #endif eLargeOut[0] = what; } void __nonMP_nLargeOff(unsigned val) { #ifdef _OPENMP unsigned int i; for (i = 1; i < m_ompt; ++i) nLargeOff[i] = val; #endif nLargeOff[0] = val; } inline static void md5_unicode_convert_set(int what, int tid) { md5_unicode_convert[tid] = what; } inline static int md5_unicode_convert_get(int tid) { return md5_unicode_convert[tid]; } void __nonMP_md5_unicode_convert(int what) { #ifdef _OPENMP unsigned int i; for (i = 1; i < m_ompt; ++i) md5_unicode_convert[i] = what; #endif md5_unicode_convert[0] = what; } #if !defined (_OPENMP) #define md5_unicode_convert_set(what, tid) md5_unicode_convert_set(what, 0) #define md5_unicode_convert_get(tid) md5_unicode_convert_get(0) #define eLargeOut_set(what, tid) eLargeOut_set(what, 0) #define eLargeOut_get(tid) eLargeOut_get(0) #define nLargeOff_set(val, tid) nLargeOff_set(val, 0) #define nLargeOff_get(tid) nLargeOff_get(0) #endif inline static void __nonMP_DynamicFunc__append_keys2() { #ifdef _OPENMP DynamicFunc__append_keys2(0,m_count,0); #else DynamicFunc__append_keys2(); #endif } static void __possMP_DynamicFunc__crypt2_md5() { #ifdef _OPENMP int i; unsigned int inc = OMP_MD5_INC; // if (dynamic_use_sse!=1) // inc = OMP_INC; #pragma omp parallel for for (i = 0; i < m_count; i += inc) DynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num()); #else DynamicFunc__crypt2_md5(); #endif } static void __nonMP_DynamicFunc__clean_input() { unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(input_buf, 0, MMX_INP_BUF_SZ); memset(total_len, 0, MMX_TOT_LEN_SZ); return; } #endif for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) { //if (total_len_X86[i]) { #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i])); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i])); total_len_X86[i] = 0; //} } return; } static void __nonMP_DynamicFunc__clean_input2() { unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(input_buf2, 0, MMX_INP_BUF2_SZ); memset(total_len2, 0, MMX_TOT_LEN2_SZ); return; } #endif if (curdat.using_flat_buffers_sse2_ok) { memset(total_len2_X86, 0, sizeof(total_len2_X86[0])*MAX_KEYS_PER_CRYPT_X86); return; } for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) { //if (total_len2_X86[i]) { #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i])); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i])); total_len2_X86[i] = 0; //} } return; } static void __nonMP_DynamicFunc__clean_input_full() { #ifdef SIMD_COEF_32 memset(input_buf, 0, MMX_INP_BUF_SZ); memset(total_len, 0, MMX_TOT_LEN_SZ); #endif memset(input_buf_X86, 0, FLAT_INP_BUF_SZ); memset(total_len_X86, 0, FLAT_TOT_LEN_SZ); } static void __nonMP_DynamicFunc__clean_input2_full() { #ifdef SIMD_COEF_32 memset(input_buf2, 0, MMX_INP_BUF2_SZ); memset(total_len2, 0, MMX_TOT_LEN2_SZ); #endif memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ); memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ); } static void __nonMP_DynamicFunc__clean_input_kwik() { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(total_len, 0, MMX_TOT_LEN_SZ); return; } #endif memset(total_len_X86, 0, FLAT_TOT_LEN_SZ); #if !ARCH_LITTLE_ENDIAN memset(input_buf_X86, 0, FLAT_INP_BUF_SZ); #endif } #ifndef _OPENMP static void __nonMP_DynamicFunc__clean_input2_kwik() { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(total_len2, 0, MMX_TOT_LEN2_SZ); return; } #endif memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ); #if !ARCH_LITTLE_ENDIAN memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ); #endif } #endif /********************************************************************************* * init() here does nothing. NOTE many formats LINKING into us will have a valid * that DOES do something, but ours does nothing. *********************************************************************************/ static void init(struct fmt_main *pFmt) { private_subformat_data *pPriv = pFmt->private.data; unsigned int i; //fprintf(stderr, "init(%s)\n", pPriv->dynamic_WHICH_TYPE_SIG); /* first off, SAVE the original format structure (owned by JtR). We may need this later */ pPriv->pFmtMain = pFmt; #ifdef _OPENMP m_ompt = omp_get_max_threads(); if (!md5_unicode_convert) { md5_unicode_convert = (int*)mem_calloc(m_ompt, sizeof(int)); eLargeOut = (eLargeOut_t*)mem_calloc(m_ompt, sizeof(eLargeOut_t)); nLargeOff = (unsigned*)mem_calloc(m_ompt, sizeof(unsigned)); for (i = 0; i < m_ompt; ++i) { eLargeOut[i] = eBase16; nLargeOff[i] = 0; } } #else if (!md5_unicode_convert) { md5_unicode_convert = (int*)mem_calloc(1, sizeof(int)); eLargeOut = (eLargeOut_t*)mem_calloc(1, sizeof(eLargeOut_t)); eLargeOut[0] = eBase16; nLargeOff = (unsigned*)mem_calloc(1, sizeof(unsigned)); nLargeOff[0] = 0; } #endif #ifdef SIMD_COEF_32 if (!input_buf) { input_buf = mem_calloc_align(1, MMX_INP_BUF_SZ, MEM_ALIGN_SIMD); total_len = mem_calloc_align(1, MMX_TOT_LEN_SZ, MEM_ALIGN_SIMD); total_len2 = mem_calloc_align(1, MMX_TOT_LEN2_SZ, MEM_ALIGN_SIMD); input_buf2 = mem_calloc_align(1, MMX_INP_BUF2_SZ, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(1, MMX_CRYPT_KEY_SZ, MEM_ALIGN_SIMD); crypt_key2 = mem_calloc_align(1, MMX_CRYPT_KEY2_SZ, MEM_ALIGN_SIMD); } #endif if (!crypt_key_X86) { crypt_key_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key_X86)); crypt_key2_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key2_X86)); input_buf_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf_X86)); input_buf2_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf2_X86)); total_len_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len_X86)); total_len2_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len2_X86)); } for (i = 0; i < 4; ++i) dynamic_BHO[i].dat = mem_calloc_align(BLOCK_LOOPS, sizeof(*(dynamic_BHO[0].dat)), MEM_ALIGN_SIMD); gost_init_table(); if (!pPriv || (pPriv->init == 1 && !strcmp(curdat.dynamic_WHICH_TYPE_SIG, pPriv->dynamic_WHICH_TYPE_SIG))) return; __nonMP_DynamicFunc__clean_input_full(); __nonMP_DynamicFunc__clean_input2_full(); // Some builds (omp vs non omp, etc) do not call these functions, so to avoid 'unused' warnings, we simply // call them here. __nonMP_DynamicFunc__clean_input_kwik(); dynamic_RESET(pFmt); if (!pPriv) return; pPriv->init = 1; memcpy(&curdat, pPriv, sizeof(private_subformat_data)); dynamic_use_sse = curdat.dynamic_use_sse; force_md5_ctx = curdat.force_md5_ctx; fmt_Dynamic.params.max_keys_per_crypt = pFmt->params.max_keys_per_crypt; fmt_Dynamic.params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt; if (pFmt->params.min_keys_per_crypt > 64) pFmt->params.min_keys_per_crypt = 64; fmt_Dynamic.params.flags = pFmt->params.flags; fmt_Dynamic.params.format_name = pFmt->params.format_name; fmt_Dynamic.params.algorithm_name = pFmt->params.algorithm_name; fmt_Dynamic.params.benchmark_comment = pFmt->params.benchmark_comment; fmt_Dynamic.params.benchmark_length = pFmt->params.benchmark_length; // we allow for 3 bytes of utf8 data to make up the number of plaintext_length unicode chars. if ( (pFmt->params.flags&FMT_UNICODE) && options.target_enc == UTF_8 ) { //printf("Here pFmt->params.plaintext_length=%d pPriv->pSetup->MaxInputLen=%d\n", pFmt->params.plaintext_length, pPriv->pSetup->MaxInputLen); pFmt->params.plaintext_length = MIN(125, pFmt->params.plaintext_length * 3); } else fmt_Dynamic.params.plaintext_length = pFmt->params.plaintext_length; fmt_Dynamic.params.salt_size = pFmt->params.salt_size; fmt_Dynamic.params.flags = pFmt->params.flags; fmt_Dynamic.methods.cmp_all = pFmt->methods.cmp_all; fmt_Dynamic.methods.cmp_one = pFmt->methods.cmp_one; fmt_Dynamic.methods.cmp_exact = pFmt->methods.cmp_exact; fmt_Dynamic.methods.set_salt = pFmt->methods.set_salt; fmt_Dynamic.methods.salt = pFmt->methods.salt; fmt_Dynamic.methods.salt_hash = pFmt->methods.salt_hash; fmt_Dynamic.methods.split = pFmt->methods.split; fmt_Dynamic.methods.set_key = pFmt->methods.set_key; fmt_Dynamic.methods.get_key = pFmt->methods.get_key; fmt_Dynamic.methods.clear_keys = pFmt->methods.clear_keys; fmt_Dynamic.methods.crypt_all = pFmt->methods.crypt_all; for (i = 0; i < PASSWORD_HASH_SIZES; ++i) { fmt_Dynamic.methods.binary_hash[i] = pFmt->methods.binary_hash[i]; fmt_Dynamic.methods.get_hash[i] = pFmt->methods.get_hash[i]; } #if !MD5_IMM { extern void MD5_std_init(struct fmt_main *pFmt); MD5_std_init(pFmt); } #endif if (curdat.input2_set_len32) { for (i = 0; i < MAX_KEYS_PER_CRYPT_X86; ++i) total_len2_X86[i] = 32; #ifdef SIMD_COEF_32 for (i = 0; i < BLOCK_LOOPS; ++i) { unsigned int j; for (j = 0; j < SIMD_COEF_32; j++) { input_buf2[i].c[GETPOS(32, j)] = 0x80; input_buf2[i].c[GETPOS(57, j)] = 0x1; total_len2[i][j] = 0x20; } } #endif } } static void done(void) { int i; MEM_FREE(total_len2_X86); MEM_FREE(total_len_X86); MEM_FREE(input_buf2_X86); MEM_FREE(input_buf_X86); MEM_FREE(crypt_key2_X86); MEM_FREE(crypt_key_X86); #ifdef SIMD_COEF_32 MEM_FREE(crypt_key2); MEM_FREE(crypt_key); MEM_FREE(input_buf2); MEM_FREE(total_len2); MEM_FREE(total_len); MEM_FREE(input_buf); #endif MEM_FREE(nLargeOff); MEM_FREE(eLargeOut); MEM_FREE(md5_unicode_convert); for (i = 0; i < 4; ++i) MEM_FREE(dynamic_BHO[i].dat); } /********************************************************************************* * This function will add a $dynamic_#$ IF there is not one, and if we have a specific * format requested. Also, it will add things like UserID, Domain, Fld3, Fld4, * Fld5, etc. *********************************************************************************/ static char *prepare(char *split_fields[10], struct fmt_main *pFmt) { private_subformat_data *pPriv = pFmt->private.data; char Tmp[80]; int i; int trim_u=0; char *cpBuilding=split_fields[1]; if (!pPriv) return split_fields[1]; // ANY field[1] longer than 490 will simply be ignored, and returned 'as is'. // the rest of this function makes this assumption. if (!cpBuilding || strnlen(cpBuilding, 491) > 490) return cpBuilding; // mime. We want to strip off ALL trailing '=' characters to 'normalize' them if (pPriv->dynamic_base64_inout == 3 && !strncmp(cpBuilding, "$dynamic_", 9)) { static char ct[496]; int len; char *cp = strchr(&cpBuilding[9], '$'), *cp2; if (!cp) return cpBuilding; ++cp; len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (len && cp[len-1] == '=') { strnzcpy(ct, cpBuilding, cp-cpBuilding+len+1); cp2 = &ct[strlen(ct)-1]; while (*cp2 == '=') *cp2-- = 0; if (cp[len]) strcat(cp2, &cp[len]); cpBuilding = ct; } } if (pFmt->params.salt_size && !strchr(split_fields[1], '$')) { if (!pPriv->nUserName && !pPriv->FldMask && options.regen_lost_salts == 0) return split_fields[1]; } // handle 'older' md5_gen(x) signature, by simply converting to $dynamic_x$ signature // Thus older md5_gen() is a valid input (or from john.pot), but ONLY the newer // $dynamic_x$ will be written out (into .pot, output lines, etc). if (!strncmp(cpBuilding, "md5_gen(", 8)) { static char ct[496]; char *cp = &cpBuilding[8], *cpo = &ct[sprintf(ct, "$dynamic_")]; while (*cp >= '0' && *cp <= '9') *cpo++ = *cp++; *cpo++ = '$'; ++cp; strcpy(cpo, cp); cpBuilding = ct; } // At this point, max length of cpBuilding is 491 (if it was a md5_gen signature) // allow a raw hash, if there is a $u but no salt if (pPriv->nUserName && split_fields[0][0] && !strchr(cpBuilding, '$') && strcmp(split_fields[0], "?")) { static char ct[496]; strcpy(ct, cpBuilding); strcat(ct, "$$U"); cpBuilding = ct; trim_u=1; } cpBuilding = FixupIfNeeded(cpBuilding, pPriv); if (trim_u) cpBuilding[strlen(cpBuilding)-3] = 0; // at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506 if (strncmp(cpBuilding, "$dynamic_", 9)) { // ok, here we add the 'generic' regen salt code if (options.regen_lost_salts && !strchr(cpBuilding, '$')) { char *cp = load_regen_lost_salt_Prepare(cpBuilding); if (cp) return cp; } return split_fields[1]; } if ( (pPriv->pSetup->flags&MGF_SALTED) == 0) return cpBuilding; /* at this point, we want to convert ANY and all $HEX$hex into values */ /* the reason we want to do this, is so that things read from john.pot file will be in proper 'native' format */ /* the ONE exception to this, is if there is a NULL byte in the $HEX$ string, then we MUST leave that $HEX$ string */ /* alone, and let the later calls in dynamic.c handle them. */ if (strstr(cpBuilding, "$HEX$")) { char *cp, *cpo; int bGood=1; static char ct[512]; strcpy(ct, cpBuilding); cp = strstr(ct, "$HEX$"); cpo = cp; *cpo++ = *cp; cp += 5; while (*cp && bGood) { if (*cp == '0' && cp[1] == '0') { bGood = 0; break; } if (atoi16[ARCH_INDEX(*cp)] != 0x7f && atoi16[ARCH_INDEX(cp[1])] != 0x7f) { *cpo++ = atoi16[ARCH_INDEX(*cp)]*16 + atoi16[ARCH_INDEX(cp[1])]; *cpo = 0; cp += 2; } else if (*cp == '$') { while (*cp && strncmp(cp, "$HEX$", 5)) { *cpo++ = *cp++; } *cpo = 0; if (!strncmp(cp, "$HEX$", 5)) { *cpo++ = *cp; cp += 5; } } else { return split_fields[1]; } } if (bGood) cpBuilding = ct; // if we came into $HEX$ removal, then cpBuilding will always be shorter } // at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506 if (pPriv->nUserName && !strstr(cpBuilding, "$$U")) { if (split_fields[0] && split_fields[0][0] && strcmp(split_fields[0], "?")) { char *userName=split_fields[0], *cp; static char ct[1024]; // assume field[0] is in format: username OR DOMAIN\\username If we find a \\, then use the username 'following' it. cp = strchr(split_fields[0], '\\'); if (cp) userName = &cp[1]; userName = HandleCase(userName, pPriv->nUserName); snprintf(ct, sizeof(ct), "%s$$U%s", cpBuilding, userName); cpBuilding = ct; } } if (pPriv->FldMask) { for (i = 0; i < 10; ++i) { if (pPriv->FldMask&(MGF_FLDx_BIT<<i)) { sprintf(Tmp, "$$F%d", i); if (split_fields[i] && split_fields[i][0] && strcmp(split_fields[i], "/") && !strstr(cpBuilding, Tmp)) { static char ct[1024]; char ct2[1024]; snprintf(ct2, sizeof(ct2), "%s$$F%d%s", cpBuilding, i, split_fields[i]); strcpy(ct, ct2); cpBuilding = ct; } } } } return cpBuilding; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[1024]; private_subformat_data *pPriv = pFmt->private.data; if (strnlen(ciphertext, 951) > 950) return ciphertext; // mime. We want to strip off ALL trailing '=' characters to 'normalize' them if (pPriv->dynamic_base64_inout == 3 && !strncmp(ciphertext, "$dynamic_", 9)) { static char ct[496]; unsigned int len; char *cp = strchr(&ciphertext[9], '$'), *cp2; if (cp) { ++cp; len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (len && cp[len-1] == '=') { strnzcpy(ct, ciphertext, cp-ciphertext+len+1); cp2 = &ct[strlen(ct)-1]; while (*cp2 == '=') *cp2-- = 0; if (cp[len]) strcat(cp2, &cp[len]); ciphertext = ct; } } } if (!strncmp(ciphertext, "$dynamic", 8)) { if (strstr(ciphertext, "$HEX$")) return RemoveHEX(out, ciphertext); return ciphertext; } if (!strncmp(ciphertext, "md5_gen(", 8)) { ciphertext += 8; do ++ciphertext; while (*ciphertext != ')') ; ++ciphertext; } if (strstr(ciphertext, "$HEX$")) { char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG); RemoveHEX(cp, ciphertext); } else snprintf(out, sizeof(out), "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext); return out; } // This split unifies case. static char *split_UC(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[1024]; private_subformat_data *pPriv = pFmt->private.data; if (!strncmp(ciphertext, "$dynamic", 8)) { if (strstr(ciphertext, "$HEX$")) RemoveHEX(out, ciphertext); else strcpy(out, ciphertext); } else { if (!strncmp(ciphertext, "md5_gen(", 8)) { ciphertext += 8; do ++ciphertext; while (*ciphertext != ')') ; ++ciphertext; } if (strstr(ciphertext, "$HEX$")) { char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG); RemoveHEX(cp, ciphertext); } else sprintf(out, "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext); } ciphertext = strchr(&out[8], '$')+1; while (*ciphertext && *ciphertext != '$') { if (*ciphertext >= 'A' && *ciphertext <= 'Z') *ciphertext += 0x20; // ASCII specific, but I really do not care. ++ciphertext; } // printf("%s\n", out); return out; } /********************************************************************************* * Stores the new salt provided into our 'working' salt *********************************************************************************/ static void set_salt(void *salt) { unsigned char *cpsalt; unsigned int todo_bits=0, i, bit; if (!salt || curdat.dynamic_FIXED_SALT_SIZE == 0) { saltlen = 0; return; } #if ARCH_ALLOWS_UNALIGNED cpsalt = *((unsigned char**)salt); #else memcpy(((void*)&(cpsalt)), ((unsigned char **)salt), sizeof(void*)); #endif saltlen = *cpsalt++ - '0'; saltlen <<= 3; saltlen += *cpsalt++ - '0'; #if ARCH_ALLOWS_UNALIGNED if (*((uint32_t*)cpsalt) != 0x30303030) #else if (memcmp(cpsalt, "0000", 4)) #endif { // this is why we used base-8. Takes an extra byte, but there is NO conditional // logic, building this number, and no multiplication. We HAVE added one conditional // check, to see if we can skip the entire load, if it is 0000. todo_bits = *cpsalt++ - '0'; todo_bits <<= 3; todo_bits += *cpsalt++ - '0'; todo_bits <<= 3; todo_bits += *cpsalt++ - '0'; todo_bits <<= 3; todo_bits += *cpsalt++ - '0'; } else cpsalt += 4; cursalt = cpsalt; if (!todo_bits) return; cpsalt += saltlen; if (todo_bits & 1) { todo_bits ^= 1; // clear that bit. saltlen2 = *cpsalt++; cursalt2 = cpsalt; if (todo_bits == 0) return; cpsalt += saltlen2; } if (todo_bits & 2) { todo_bits ^= 2; // clear that bit. usernamelen = *cpsalt++; username = cpsalt; if (todo_bits == 0) return; cpsalt += usernamelen; } bit = 4; for (i = 0; i < 10; ++i, bit<<=1) { if (todo_bits & bit) { todo_bits ^= bit; // clear that bit. fld_lens[i] = *cpsalt++; flds[i] = cpsalt; if (todo_bits == 0) return; cpsalt += fld_lens[i]; } } } /********************************************************************************* * Sets this key. It will either be dropped DIRECTLY into the input buffer * number 1, or put into an array of keys. Which one happens depends upon * HOW the generic functions were laid out for this type. Not all types can * load into the input. If not they MUST use the key array. Using the input * buffer is faster, when it can be safely done. *********************************************************************************/ static void set_key(char *key, int index) { unsigned int len; //printf("idx=%d key=%s\n", index, key); #ifdef SIMD_COEF_32 if (curdat.store_keys_in_input==2) dynamic_use_sse = 3; else if (curdat.md5_startup_in_x86) dynamic_use_sse = 2; else if (dynamic_use_sse==2) dynamic_use_sse = 1; #endif if (curdat.nPassCase>1) key = HandleCase(key, curdat.nPassCase); // Ok, if the key is in unicode/utf8, we switch it here one time, and are done with it. if (curdat.store_keys_in_input) { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { // code derived from rawMD5_fmt_plug.c code from magnum #if ARCH_ALLOWS_UNALIGNED const uint32_t *key32 = (uint32_t*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const uint32_t *key32 = is_aligned(key, sizeof(uint32_t)) ? (uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key); #endif unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); uint32_t *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)]; uint32_t *keybuf_word = keybuffer; unsigned int len; uint32_t temp; len = 0; while((temp = *key32++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = (temp & 0xff) | (0x80 << 8); ++len; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = (temp & 0xffff) | (0x80 << 16); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = temp | (0x80U << 24); len+=3; goto key_cleaning; } *keybuf_word = temp; len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[14*SIMD_COEF_32] = len << 3; return; } #endif len = strlen(key); if (len > 110) // we never do UTF-8 -> UTF-16 in this mode len = 110; // if (index==0) { // we 'have' to use full clean here. NOTE 100% sure why, but 10 formats fail if we do not. // __nonMP_DynamicFunc__clean_input_full(); // } #if MD5_X2 if (index & 1) memcpy(input_buf_X86[index>>MD5_X2].x2.b2, key, len); else #endif memcpy(input_buf_X86[index>>MD5_X2].x1.b, key, len); saved_key_len[index] = total_len_X86[index] = len; } else { len = strlen(key); if (len > 110 && !(fmt_Dynamic.params.flags & FMT_UNICODE)) len = 110; // if (index==0) { // __nonMP_DynamicFunc__clean_input_full(); // } keys_dirty = 1; memcpy(((char*)(saved_key[index])), key, len); saved_key_len[index] = len; } } static void clear_keys(void) { #ifdef SIMD_COEF_32 if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED) { __nonMP_DynamicFunc__clean_input_full(); return; } if (curdat.store_keys_in_input==1 || curdat.store_keys_in_input==3) return; if (curdat.md5_startup_in_x86) __nonMP_DynamicFunc__clean_input_full(); // This clean was causing failures (dirty buffers left) for dyna_51, 61 and formspring. // once commented out, dyna fully passes. I see no reason to keep this here at all. // else // __nonMP_DynamicFunc__clean_input_kwik(); #else __nonMP_DynamicFunc__clean_input_full(); #endif } /********************************************************************************* * Returns the key. NOTE how it gets it depends upon if we are storing * into the array of keys (there we simply return it), or if we are * loading into input buffer #1. If in input buffer, we have to re-create * the key, prior to returning it. *********************************************************************************/ static char *get_key(int index) { if (curdat.store_keys_in_input) { unsigned int i; unsigned char *cp; #ifdef SIMD_COEF_32 //if (dynamic_use_sse==1) { // Note, if we are not in if (dynamic_use_sse && !curdat.md5_startup_in_x86) { unsigned int s; unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); //if (curdat.store_keys_in_input && dynamic_use_sse==1) // s = saved_key_len[index]; // NOTE, we now have to get the length from the buffer, we do NOT store it into a saved_key_len buffer. uint32_t *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)]; s = keybuffer[14*SIMD_COEF_32] >> 3; for (i=0;i<s;i++) out[i] = input_buf[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; out[i] = 0; return (char*)out; } #endif #if MD5_X2 if (index & 1) cp = input_buf_X86[index>>MD5_X2].x2.B2; else #endif cp = input_buf_X86[index>>MD5_X2].x1.B; for (i=0;i<saved_key_len[index];++i) out[i] = cp[i]; out[i] = 0; return (char*)out; } else { saved_key[index][saved_key_len[index]] = '\0'; return saved_key[index]; } } /********************************************************************************* * Looks for ANY key that was cracked. *********************************************************************************/ static int cmp_all(void *binary, int count) { unsigned int i; #ifdef SIMD_COEF_32 unsigned int j; if (dynamic_use_sse&1) { unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32); for (i = 0; i < cnt; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) if ( *((uint32_t *)binary) == crypt_key[i].w[j]) return 1; } return 0; } #endif for (i = 0; i < count; i++) { #if MD5_X2 if (i&1) { if (!(((uint32_t *)binary)[0] - crypt_key_X86[i>>MD5_X2].x2.w2[0])) return 1; } else #endif if (!(((uint32_t *)binary)[0] - crypt_key_X86[i>>MD5_X2].x1.w[0])) return 1; } return 0; } #if ARCH_LITTLE_ENDIAN #define MASK_4x6 0x00ffffff #else #define MASK_4x6 0xffffff00 #endif static int cmp_all_64_4x6(void *binary, int count) { unsigned int i; #ifdef SIMD_COEF_32 unsigned int j; if (dynamic_use_sse==1) { unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32); for (i = 0; i < cnt; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) if ( *((uint32_t *)binary) == (crypt_key[i].w[j] & MASK_4x6)) return 1; } return 0; } #endif for (i = 0; i < count; i++) { #if MD5_X2 if (i&1) { if (!(((uint32_t *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x2.w2[0]&MASK_4x6))) return 1; } else #endif if (!(((uint32_t *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x1.w[0]&MASK_4x6))) return 1; } return 0; } /********************************************************************************* * In this code, we always do exact compare, so if this function is called, it * simply returns true. *********************************************************************************/ static int cmp_exact(char *binary, int index) { return 1; } /********************************************************************************* * There was 'something' that was possibly hit. Now john will ask us to check * each one of the data items, for an 'exact' match. *********************************************************************************/ static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); if ( (((uint32_t *)binary)[0] == ((uint32_t *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) && (((uint32_t *)binary)[1] == ((uint32_t *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) && (((uint32_t *)binary)[2] == ((uint32_t *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) && (((uint32_t *)binary)[3] == ((uint32_t *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))])) return 1; return 0; } #endif #if MD5_X2 if (index & 1) { if ( (((uint32_t *)binary)[0] == crypt_key_X86[index>>MD5_X2].x2.w2[0] ) && (((uint32_t *)binary)[1] == crypt_key_X86[index>>MD5_X2].x2.w2[1] ) && (((uint32_t *)binary)[2] == crypt_key_X86[index>>MD5_X2].x2.w2[2] ) && (((uint32_t *)binary)[3] == crypt_key_X86[index>>MD5_X2].x2.w2[3] ) ) return 1; return 0; } #endif if ( (((uint32_t *)binary)[0] == crypt_key_X86[index>>MD5_X2].x1.w[0] ) && (((uint32_t *)binary)[1] == crypt_key_X86[index>>MD5_X2].x1.w[1] ) && (((uint32_t *)binary)[2] == crypt_key_X86[index>>MD5_X2].x1.w[2] ) && (((uint32_t *)binary)[3] == crypt_key_X86[index>>MD5_X2].x1.w[3] ) ) return 1; return 0; } static int cmp_one_64_4x6(void *binary, int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); if ( (((uint32_t *)binary)[0] == (((uint32_t *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) && (((uint32_t *)binary)[1] == (((uint32_t *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) && (((uint32_t *)binary)[2] == (((uint32_t *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) && (((uint32_t *)binary)[3] == (((uint32_t *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6))) return 1; return 0; } #endif #if MD5_X2 if (index & 1) { if ( (((uint32_t*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x2.w2[0] & MASK_4x6)) && (((uint32_t*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x2.w2[1] & MASK_4x6)) && (((uint32_t*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x2.w2[2] & MASK_4x6)) && (((uint32_t*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x2.w2[3] & MASK_4x6)) ) return 1; return 0; } #endif if ( (((uint32_t*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x1.w[0] & MASK_4x6)) && (((uint32_t*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x1.w[1] & MASK_4x6)) && (((uint32_t*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x1.w[2] & MASK_4x6)) && (((uint32_t*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x1.w[3] & MASK_4x6)) ) return 1; return 0; } /********************************************************************************* ********************************************************************************* * This is the real 'engine'. It simply calls functions one * at a time from the array of functions. ********************************************************************************* *********************************************************************************/ static int crypt_all(int *pcount, struct db_salt *salt) { // set m_count. This is our GLOBAL value, used by ALL of the script functions to know how // many keys are loaded, and how much work we do. m_count = *pcount; __nonMP_eLargeOut(eBase16); __nonMP_nLargeOff(0); #ifdef SIMD_COEF_32 // If this format is MMX built, but is supposed to start in X86 (but be switchable), then we // set that value here. if (curdat.store_keys_in_input==2) dynamic_use_sse = 3; else if (curdat.md5_startup_in_x86) dynamic_use_sse = 2; else if (dynamic_use_sse==2) dynamic_use_sse = 1; #endif __nonMP_md5_unicode_convert(0); if (curdat.dynamic_base16_upcase) { dynamic_itoa16 = itoa16u; itoa16_w2 = itoa16_w2_u; } else { dynamic_itoa16 = itoa16; itoa16_w2 = itoa16_w2_l; } // There may have to be some 'prelim' work done with the keys. This is so that if we 'know' that keys were // loaded into the keys[] array, but that we should do something like md5 and base-16 put them into an // input slot, then we do that FIRST, prior to calling the script functions. Thus for a format such as // md5(md5($p).$s) we could md5 the pass, and base-16 put it into a input buffer. Then when john sets salt // and calls crypt all, the crypt script would simply set the input len to 32, append the salt and call a // single crypt. That eliminates almost 1/2 of the calls to md5_crypt() for the format show in this example. if (keys_dirty) { if (curdat.store_keys_normal_but_precompute_hash_to_output2) { keys_dirty = 0; if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED2) __nonMP_DynamicFunc__clean_input2_full(); else __nonMP_DynamicFunc__clean_input2(); if (curdat.store_keys_in_input_unicode_convert) __nonMP_md5_unicode_convert(1); __nonMP_DynamicFunc__append_keys2(); __nonMP_md5_unicode_convert(0); //if (curdat.using_flat_buffers_sse2_ok) { if (curdat.dynamic_use_sse == 0) { if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) { #ifdef _OPENMP #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(0,m_count,0); break #else #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(); break #endif switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) { CASE(MD5); CASE(MD4); CASE(SHA1); CASE(SHA224); CASE(SHA256); CASE(SHA384); CASE(SHA512); CASE(GOST); CASE(WHIRLPOOL); CASE(Tiger); CASE(RIPEMD128); CASE(RIPEMD160); CASE(RIPEMD256); CASE(RIPEMD320); CASE(HAVAL128_3); CASE(HAVAL128_4); CASE(HAVAL128_5); CASE(HAVAL160_3); CASE(HAVAL160_4); CASE(HAVAL160_5); CASE(HAVAL192_3); CASE(HAVAL192_4); CASE(HAVAL192_5); CASE(HAVAL224_3); CASE(HAVAL224_4); CASE(HAVAL224_5); CASE(HAVAL256_3); CASE(HAVAL256_4); CASE(HAVAL256_5); CASE(MD2); CASE(PANAMA); CASE(SKEIN224); CASE(SKEIN256); CASE(SKEIN384); CASE(SKEIN512); CASE(SHA3_224); CASE(SHA3_256); CASE(SHA3_384); CASE(SHA3_512); CASE(KECCAK_256); CASE(KECCAK_512); // LARGE_HASH_EDIT_POINT } } else if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) { unsigned int i; for (i = 0; i < m_count; ++i) total_len_X86[i] = curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX; #undef CASE #ifdef _OPENMP #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(0,m_count,0); break #else #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(); break #endif switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) { CASE(MD5); CASE(MD4); CASE(SHA1); CASE(SHA224); CASE(SHA256); CASE(SHA384); CASE(SHA512); CASE(GOST); CASE(WHIRLPOOL); CASE(Tiger); CASE(RIPEMD128); CASE(RIPEMD160); CASE(RIPEMD256); CASE(RIPEMD320); CASE(HAVAL128_3); CASE(HAVAL128_4); CASE(HAVAL128_5); CASE(HAVAL160_3); CASE(HAVAL160_4); CASE(HAVAL160_5); CASE(HAVAL192_3); CASE(HAVAL192_4); CASE(HAVAL192_5); CASE(HAVAL224_3); CASE(HAVAL224_4); CASE(HAVAL224_5); CASE(HAVAL256_3); CASE(HAVAL256_4); CASE(HAVAL256_5); CASE(MD2); CASE(PANAMA); CASE(SKEIN224); CASE(SKEIN256); CASE(SKEIN384); CASE(SKEIN512); CASE(SHA3_224); CASE(SHA3_256); CASE(SHA3_384); CASE(SHA3_512); CASE(KECCAK_256); CASE(KECCAK_512); // LARGE_HASH_EDIT_POINT } } else { // calls 'old' code (ossl, sorry :( We should FIND and remove any format // written this way, if it is __possMP_DynamicFunc__crypt2_md5(); } } else { __possMP_DynamicFunc__crypt2_md5(); if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) { if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==2) __nonMP_DynamicFunc__SSEtoX86_switch_output2(); __nonMP_DynamicFunc__clean_input(); __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16(); } } } } // Ok, now we 'run' the script. We simply call 1 function right after the other. // ALL functions are void f(void). They use the globals: // input_buf1[] input_buf2[] (requires thread safety) // total_len1[] total_len2[] (requires thread safety) // crypt1[] crypt2[] (requires thread safety) // md5_unicode_convert (requires thread safety, had to change to array) // saved_key[] (const?) // saved_key_len[] (const) // cursalt, cursalt2 (const) // saltlen, saltlen2 (const) // m_count (const) // nConsts (const) // Consts[], ConstsLen[] (const) // Since this array is in a structure, we assign a simple pointer to it // before walking. Trivial improvement, but every cycle counts :) { #ifdef _OPENMP if ((curdat.pFmtMain->params.flags & FMT_OMP) == FMT_OMP) { int j; unsigned int inc = (m_count+m_ompt-1) / m_ompt; //printf("maxkeys=%d m_count=%d inc1=%d granularity=%d inc2=%d\n", curdat.pFmtMain->params.max_keys_per_crypt, m_count, inc, curdat.omp_granularity, ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity); inc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity; #pragma omp parallel for shared(curdat, inc, m_count) for (j = 0; j < m_count; j += inc) { unsigned int i; unsigned int top=j+inc; /* The last block may 'appear' to have more keys than we have in the entire buffer space. This is due to the granularity. If so, reduce that last one to stop at end of our buffers. NOT doing this is causes a huge buffer overflow. */ if (top > curdat.pFmtMain->params.max_keys_per_crypt) top = curdat.pFmtMain->params.max_keys_per_crypt; // we now run a full script in this thread, using only a subset of // the data, from [j,top) The next thread will run from [top,top+inc) // each thread will take the next inc values, until we get to m_count for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) (*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num()); } } else { unsigned int i; // same code (almost), but without the threads. for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) (*(curdat.dynamic_FUNCTIONS[i]))(0,m_count,0); } #else unsigned int i; for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) { (*(curdat.dynamic_FUNCTIONS[i]))(); #if 0 // Dump state (for debugging help) if (i==0) printf("\npassword=%.*s\n", saved_key_len[0], saved_key[0]); printf("\nState after function: %s\n", dynamic_Find_Function_Name(curdat.dynamic_FUNCTIONS[i])); // dump input 1 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("input_buf[0]", input_buf[0].c, 64, 0); dump_stuff_mmx_msg("input_buf[1]", input_buf[0].c, 64, 1); dump_stuff_mmx_msg("input_buf[2]", input_buf[0].c, 64, 2); dump_stuff_mmx_msg("input_buf[3]", input_buf[0].c, 64, 3); #endif printf("input_buf86[0] : %*.*s\n", total_len_X86[0],total_len_X86[0],input_buf_X86[0].x1.b); printf("input_buf86[1] : %*.*s\n", total_len_X86[1],total_len_X86[1],input_buf_X86[1].x1.b); printf("input_buf86[2] : %*.*s\n", total_len_X86[2],total_len_X86[2],input_buf_X86[2].x1.b); printf("input_buf86[3] : %*.*s\n", total_len_X86[3],total_len_X86[3],input_buf_X86[3].x1.b); // dump crypt 1 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("crypt_key[0]", crypt_key[0].c, 16, 0); dump_stuff_mmx_msg("crypt_key[1]", crypt_key[0].c, 16, 1); dump_stuff_mmx_msg("crypt_key[2]", crypt_key[0].c, 16, 2); dump_stuff_mmx_msg("crypt_key[3]", crypt_key[0].c, 16, 3); #endif dump_stuff_be_msg("crypt_key_X86[0]", crypt_key_X86[0].x1.b, 16); dump_stuff_be_msg("crypt_key_X86[1]", crypt_key_X86[1].x1.b, 16); dump_stuff_be_msg("crypt_key_X86[2]", crypt_key_X86[2].x1.b, 16); dump_stuff_be_msg("crypt_key_X86[3]", crypt_key_X86[3].x1.b, 16); // dump input 2 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("input_buf2[0]", input_buf2[0].c, 64, 0); dump_stuff_mmx_msg("input_buf2[1]", input_buf2[0].c, 64, 1); dump_stuff_mmx_msg("input_buf2[2]", input_buf2[0].c, 64, 2); dump_stuff_mmx_msg("input_buf2[3]", input_buf2[0].c, 64, 3); #endif printf("input2_buf86[0] : %*.*s\n", total_len2_X86[0],total_len2_X86[0],input_buf2_X86[0].x1.b); printf("input2_buf86[1] : %*.*s\n", total_len2_X86[1],total_len2_X86[1],input_buf2_X86[1].x1.b); printf("input2_buf86[2] : %*.*s\n", total_len2_X86[2],total_len2_X86[2],input_buf2_X86[2].x1.b); printf("input2_buf86[3] : %*.*s\n", total_len2_X86[3],total_len2_X86[3],input_buf2_X86[3].x1.b); // dump crypt 2 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("crypt_key2[0]", crypt_key2[0].c, 16, 0); dump_stuff_mmx_msg("crypt_key2[1]", crypt_key2[0].c, 16, 1); dump_stuff_mmx_msg("crypt_key2[2]", crypt_key2[0].c, 16, 2); dump_stuff_mmx_msg("crypt_key2[3]", crypt_key2[0].c, 16, 3); #endif dump_stuff_be_msg("crypt_key2_X86[0]", crypt_key2_X86[0].x1.b, 16); dump_stuff_be_msg("crypt_key2_X86[1]", crypt_key2_X86[1].x1.b, 16); dump_stuff_be_msg("crypt_key2_X86[2]", crypt_key2_X86[2].x1.b, 16); dump_stuff_be_msg("crypt_key2_X86[3]", crypt_key2_X86[3].x1.b, 16); #endif } #endif } return m_count; } /********************************************************************************* * 'normal' hashing functions *********************************************************************************/ extern char *MD5_DumpHexStr(void *p); #if !ARCH_LITTLE_ENDIAN // the lower 8 bits is zero on the binary (but filled in on the hash). We need to dump the low 8 static int binary_hash_0_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_0; } static int binary_hash_1_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_1; } static int binary_hash_2_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_2; } static int binary_hash_3_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_3; } static int binary_hash_4_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_4; } static int binary_hash_5_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_5; } static int get_hash_0_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_0; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_0;} static int get_hash_1_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_1; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_1;} static int get_hash_2_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_2; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_2;} static int get_hash_3_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_3; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_3;} static int get_hash_4_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_4; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_4;} static int get_hash_5_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_5; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_5;} #endif static int get_hash_0(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_0; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_0; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_0; } static int get_hash_1(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_1; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_1; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_1; } static int get_hash_2(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_2; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_2; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_2; } static int get_hash_3(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_3; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_3; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_3; } static int get_hash_4(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_4; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_4; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_4; } static int get_hash_5(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_5; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_5; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_5; } static int get_hash_6(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_6; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_6; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_6; } /************************************************************************ * We now fully handle all hashing of salts, here in the format. We * return a pointer ot an allocated salt record. Thus, we search all * of the salt records, looking for the same salt. If we find it, we * want to return THAT pointer, and not allocate a new pointer. * This works great, but forces us to do salt comparision here. ***********************************************************************/ #define DYNA_SALT_HASH_BITS SALT_HASH_LOG #define DYNA_SALT_HASH_SIZE (1<<DYNA_SALT_HASH_BITS) #define DYNA_SALT_HASH_MOD (DYNA_SALT_HASH_SIZE-1) typedef struct dyna_salt_list_entry { struct dyna_salt_list_entry *next; unsigned len; unsigned char *salt; } dyna_salt_list_entry; typedef struct { dyna_salt_list_entry *head, *tail; int count; } dyna_salt_list_main; typedef struct { dyna_salt_list_main List; } SaltHashTab_t; static SaltHashTab_t *SaltHashTab=NULL; static dyna_salt_list_entry *pSaltHashData=NULL, *pSaltHashDataNext=NULL; static int dyna_salt_list_count=0; static unsigned char *pSaltDataBuf=NULL, *pNextSaltDataBuf=NULL; static int nSaltDataBuf=0; static unsigned char *AddSaltHash(unsigned char *salt, unsigned int len, unsigned int idx) { unsigned char *pRet; if (dyna_salt_list_count == 0) { pSaltHashDataNext = pSaltHashData = mem_calloc_tiny(sizeof(dyna_salt_list_entry) * 25000, MEM_ALIGN_WORD); dyna_salt_list_count = 25000; } if (nSaltDataBuf < len) { pSaltDataBuf = pNextSaltDataBuf = mem_alloc_tiny(0x60000, MEM_ALIGN_NONE); nSaltDataBuf = 0x60000; } pRet = pNextSaltDataBuf; pSaltHashDataNext->salt = pNextSaltDataBuf; memcpy(pSaltHashDataNext->salt, salt, len); pSaltHashDataNext->len = len; pNextSaltDataBuf += len; nSaltDataBuf -= len; if (SaltHashTab[idx].List.count == 0) SaltHashTab[idx].List.tail = SaltHashTab[idx].List.head = pSaltHashDataNext; else { SaltHashTab[idx].List.tail->next = pSaltHashDataNext; SaltHashTab[idx].List.tail = pSaltHashDataNext; } ++SaltHashTab[idx].List.count; ++pSaltHashDataNext; --dyna_salt_list_count; return pRet; } static unsigned char *FindSaltHash(unsigned char *salt, unsigned int len, CRC32_t crc) { unsigned int idx = crc & DYNA_SALT_HASH_MOD; dyna_salt_list_entry *p; if (!SaltHashTab) SaltHashTab = mem_calloc_tiny(sizeof(SaltHashTab_t) * DYNA_SALT_HASH_SIZE, MEM_ALIGN_WORD); if (!SaltHashTab[idx].List.count) { return AddSaltHash(salt, len, idx); } // Ok, we have some salts in this hash list. Now walk the list, searching for an EQUAL salt. p = SaltHashTab[idx].List.head; while (p) { if (len == p->len && !memcmp((char*)salt, (char*)p->salt, len)) { return p->salt; // found it! return this one, so we do not allocate another. } p = p->next; } return AddSaltHash(salt, len, idx); } static unsigned char *HashSalt(unsigned char *salt, unsigned int len) { CRC32_t crc = 0xffffffff, i; unsigned char *ret_hash; // compute the hash. for (i = 0; i < len; ++i) crc = jtr_crc32(crc,salt[i]); crc = ~crc; ret_hash = FindSaltHash(salt, len, crc); return ret_hash; } static int ConvertFromHex(unsigned char *p, int len) { unsigned char *cp; unsigned int i, x; if (!p || memcmp(p, "HEX$", 4)) return len; // Ok, do a convert, and return 'new' len. len -= 4; len >>= 1; cp = p; x = len; for (i=4; x; --x, i+= 2) { *cp++ = atoi16[ARCH_INDEX(p[i])]*16 + atoi16[ARCH_INDEX(p[i+1])]; } *cp = 0; return len; } static unsigned int salt_external_to_internal_convert(unsigned char *extern_salt, unsigned char *Buffer) { // Ok, we get this: extern_salt = salt_data$$2salt2$$Uuser ... where anything can be missing or in any order // the any order has 1 exception of salt_data MUST be first. So if we get $$2salt2, then we know there is no salt-1 value. unsigned char *salt2=0, *userid=0, *Flds[10]; int i, nsalt2=0, nuserid=0, nFlds[10]={0,0,0,0,0,0,0,0,0,0}; unsigned int len = strlen((char*)extern_salt), bit; unsigned int bit_array=0; unsigned int the_real_len = 6; // 2 bytes base-8 length, and 4 bytes base-8 bitmap. // work from back of string to front, looking for the $$X signatures. for (i = len-3; i >= 0; --i) { if (extern_salt[i] == '$' && extern_salt[i+1] == '$') { // a 'likely' extra salt value. switch(extern_salt[i+2]) { case '2': if (curdat.b2Salts) { salt2 = &extern_salt[i+3]; nsalt2 = strlen((char*)salt2); nsalt2 = ConvertFromHex(salt2, nsalt2); extern_salt[i] = 0; bit_array |= 1; the_real_len += (nsalt2+1); } break; case 'U': if (curdat.nUserName) { userid = &extern_salt[i+3]; nuserid = strlen((char*)userid); nuserid = ConvertFromHex(userid, nuserid); extern_salt[i] = 0; bit_array |= 2; the_real_len += (nuserid+1); } break; case 'F': { if (extern_salt[i+3] >= '0' && extern_salt[i+3] <= '9') { if (curdat.FldMask && (curdat.FldMask & (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) == (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) { Flds[extern_salt[i+3]-'0'] = &extern_salt[i+4]; nFlds[extern_salt[i+3]-'0'] = strlen((char*)(Flds[extern_salt[i+3]-'0'])); nFlds[extern_salt[i+3]-'0'] = ConvertFromHex(Flds[extern_salt[i+3]-'0'], nFlds[extern_salt[i+3]-'0']); extern_salt[i] = 0; bit_array |= (1<<(2+extern_salt[i+3]-'0')); the_real_len += (nFlds[extern_salt[i+3]-'0']+1); } break; } } } } } // We have now ripped the data apart. Now put it into Buffer, in proper ORDER // Length of salt (salt1) These 2 are stored as base-8 numbers. len = strlen((char*)extern_salt); len = ConvertFromHex(extern_salt, len); the_real_len += len; *Buffer++ = (len>>3) + '0'; *Buffer++ = (len&7) + '0'; // bit array *Buffer++ = (bit_array>>9) + '0'; *Buffer++ = ((bit_array>>6)&7) + '0'; *Buffer++ = ((bit_array>>3)&7) + '0'; *Buffer++ = (bit_array&7) + '0'; memcpy((char*)Buffer, (char*)extern_salt, len); Buffer += len; if (!bit_array) return the_real_len; if (nsalt2) { *Buffer++ = nsalt2; memcpy((char*)Buffer, (char*)salt2, nsalt2); Buffer += nsalt2; bit_array &= ~1; if (!bit_array) return the_real_len; } if (nuserid) { *Buffer++ = nuserid; memcpy((char*)Buffer, (char*)userid, nuserid); if (curdat.nUserName==2) { Buffer[nuserid] = 0; strupr((char*)Buffer); } else if (curdat.nUserName==2) { Buffer[nuserid] = 0; strlwr((char*)Buffer); } Buffer += nuserid; bit_array &= ~2; if (!bit_array) return the_real_len; } bit = 4; for (i = 0; i < 10; ++i, bit<<=1) { if (nFlds[i]) { *Buffer++ = nFlds[i]; memcpy((char*)Buffer, (char*)(Flds[i]), nFlds[i]); Buffer += nFlds[i]; bit_array &= ~bit; if (!bit_array) return the_real_len; } } return the_real_len; } /********************************************************************************* * This salt function has been TOTALLY re-written. Now, we do these things: * 1. convert from external format ($salt$$Uuser$$2HEX$salt2_in_hex, etc, into * our internal format. Our internal format is 2 base-8 numbers (2 digit and 4 * digit), followed by the 'raw' salt bytes, followed by pascal strings of any * other special salt values (salt2, user, flields 0 to 9). The first 2 digit * base 8 number is the length of the binary bytes of the 'real' salt. The * 2nd base-8 4 digit number, is a bit mask of what 'extra' salt types are * contained. * 2. We allocate and 'own' the salt buffers here, so that: * 3. We detect duplicate salts. NOTE, we have normalized the salts, so 2 salts that * appear different (external format), appear exactly the same on internal format. * Thus, we dupe remove them here. * 4. We allocation storage for the salts. The ONLY thing we return to john, is * a 4 (or 8 byte in 64 bit builds) pointer to the salt. Thus, when we find * a dupe, we do not have to allocate ANY memory, and simply return the pointer * to the original salt (which is the same as the one we are working on now). * * this is much more complex, however, it allows us to use much less memory, to * have the set_salt function operate VERY quickly (all processing is done here). * It also allows john load time to happen FASTER (yes faster), that it was happening * due to smaller memory footprint, and john's external salt collision to have * less work to do. The memory footprint was also reduced, because now we store * JUST the require memory, and a pointer. Before, often we stored a LOT of memory * for many format types. For a few types, we do use more memory with this method * than before, but for more the memory usage is way down. *********************************************************************************/ static void *get_salt(char *ciphertext) { char Salt[SALT_SIZE+1], saltIntBuf[SALT_SIZE+1]; int off, possible_neg_one=0; unsigned char *saltp; unsigned int the_real_len; static union x { unsigned char salt_p[sizeof(unsigned char*)]; ARCH_WORD p[1]; } union_x; if ( (curdat.pSetup->flags&MGF_SALTED) == 0) { memset(union_x.salt_p, 0, sizeof(union_x.salt_p)); return union_x.salt_p; } memset(Salt, 0, SALT_SIZE+1); // Ok, see if the wrong dynamic type is loaded (such as the 'last' dynamic type). if (!strncmp(ciphertext, "$dynamic_", 9)) { char *cp1 = &ciphertext[9]; char *cp2 = &curdat.dynamic_WHICH_TYPE_SIG[9]; while (*cp2 && *cp2 == *cp1) { ++cp1; ++cp2; } if (*cp2) { char subformat[17]; struct fmt_main *pFmtLocal; int nFmtNum; memcpy(subformat, ciphertext, 16); subformat[16] = 0; cp2 = &subformat[9]; while (*cp2 && *cp2 != '$') ++cp2; *cp2 = 0; nFmtNum = -1; sscanf(subformat, "$dynamic_%d", &nFmtNum); if (nFmtNum==-1) return union_x.salt_p; pFmtLocal = dynamic_Get_fmt_main(nFmtNum); memcpy(&curdat, pFmtLocal->private.data, sizeof(private_subformat_data)); } } if (curdat.dynamic_FIXED_SALT_SIZE==0 && !curdat.nUserName && !curdat.FldMask) return union_x.salt_p; if (!strncmp(ciphertext, "$dynamic_", 9)) off=curdat.dynamic_SALT_OFFSET; else off=curdat.dynamic_SALT_OFFSET-strlen(curdat.dynamic_WHICH_TYPE_SIG); if (ciphertext[off] == '$') { if (ciphertext[off+1]=='U' && curdat.nUserName) possible_neg_one = -1; else if (ciphertext[off+1]=='2' && curdat.b2Salts) possible_neg_one = -1; else if (ciphertext[off+1]=='F' && ciphertext[off+2]>='0' && ciphertext[off+2]<='9' && curdat.FldMask) { if ((curdat.FldMask & (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) == (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) possible_neg_one = -1; } } strnzcpy(Salt, &ciphertext[off + possible_neg_one], SALT_SIZE); if (curdat.dynamic_salt_as_hex) { unsigned char Buf[128]; unsigned int slen=strlen(Salt); switch (curdat.dynamic_salt_as_hex_format_type) { // TODO: Come up with some way to put these into a CASE(HASH) #define #define SPH_CASE(H,F,S) case MGF__##H: {sph_##F##_context c;sph_##F##_init(&c);sph_##F(&c,(const unsigned char*)Salt,slen);sph_##F##_close(&c,Buf); \ memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; } #define OSSL_CASE(H,C,S) case MGF__##H: {C##_CTX c;H##_Init(&c);H##_Update(&c,Salt,slen);H##_Final(Buf,&c); \ memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; } #define KECCAK_CASE(H,S) case MGF__##H: {KECCAK_CTX c;H##_Init(&c);KECCAK_Update(&c,(BitSequence*)Salt,slen);KECCAK_Final(Buf,&c); \ memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; } case MGF__MD5: { // Do not 'worry' about SSE/MMX, Only do 'generic' md5. This is ONLY done // at the start of the run. We will NEVER see this run, once john starts. MD5_CTX ctx; int i; char *cpo; MD5_Init(&ctx); if (curdat.dynamic_salt_as_hex & 0x100) { char *s2 = mem_alloc(slen*2+1); for (i = 0; i < slen; ++i) { s2[i<<1] = Salt[i]; s2[(i<<1)+1] = 0; } MD5_Update(&ctx, s2, slen*2); MEM_FREE(s2); } else MD5_Update(&ctx, Salt, slen); MD5_Final(Buf, &ctx); if ( (curdat.dynamic_salt_as_hex&3) == 2) { strcat(Salt, "$$2"); cpo = &Salt[slen+3]; } else { cpo = Salt; memset(Salt, 0, SALT_SIZE+1); } base64_convert(Buf, e_b64_raw, 16, cpo, e_b64_hex, SALT_SIZE, 0, 0); break; } OSSL_CASE(MD4,MD4,16) OSSL_CASE(SHA1,SHA,20) OSSL_CASE(SHA224,SHA256,28) OSSL_CASE(SHA256,SHA256,32) OSSL_CASE(SHA384,SHA512,48) OSSL_CASE(SHA512,SHA512,64) OSSL_CASE(WHIRLPOOL,WHIRLPOOL,64) case MGF__GOST: { gost_ctx ctx; john_gost_init(&ctx); john_gost_update(&ctx, (const unsigned char*)Salt, slen); john_gost_final(&ctx, (unsigned char*)Buf); memset(Salt, 0, SALT_SIZE+1); base64_convert(Buf, e_b64_raw, 32, Salt, e_b64_hex, SALT_SIZE, 0, 0); break; } SPH_CASE(Tiger,tiger,24) SPH_CASE(RIPEMD128,ripemd128,16) SPH_CASE(RIPEMD160,ripemd160,20) SPH_CASE(RIPEMD256,ripemd256,32) SPH_CASE(RIPEMD320,ripemd320,40) SPH_CASE(HAVAL128_3,haval128_3,16) SPH_CASE(HAVAL128_4,haval128_4,16) SPH_CASE(HAVAL128_5,haval128_5,16) SPH_CASE(HAVAL160_3,haval160_3,20) SPH_CASE(HAVAL160_4,haval160_4,20) SPH_CASE(HAVAL160_5,haval160_5,20) SPH_CASE(HAVAL192_3,haval192_3,24) SPH_CASE(HAVAL192_4,haval192_4,24) SPH_CASE(HAVAL192_5,haval192_5,24) SPH_CASE(HAVAL224_3,haval224_3,28) SPH_CASE(HAVAL224_4,haval224_4,28) SPH_CASE(HAVAL224_5,haval224_5,28) SPH_CASE(HAVAL256_3,haval256_3,32) SPH_CASE(HAVAL256_4,haval256_4,32) SPH_CASE(HAVAL256_5,haval256_5,32) SPH_CASE(MD2,md2,16) SPH_CASE(PANAMA,panama,32) SPH_CASE(SKEIN224,skein224,28) SPH_CASE(SKEIN256,skein256,32) SPH_CASE(SKEIN384,skein384,48) SPH_CASE(SKEIN512,skein512,64) KECCAK_CASE(SHA3_224,28) KECCAK_CASE(SHA3_256,32) KECCAK_CASE(SHA3_384,48) KECCAK_CASE(SHA3_512,64) KECCAK_CASE(KECCAK_256,32) KECCAK_CASE(KECCAK_512,64) // LARGE_HASH_EDIT_POINT default: { error_msg("Invalid dynamic flags seen. Data type not yet defined\n"); } } } the_real_len = salt_external_to_internal_convert((unsigned char*)Salt, (unsigned char*)saltIntBuf); // Now convert this into a stored salt, or find the 'already' stored same salt. saltp = HashSalt((unsigned char*)saltIntBuf, the_real_len); memcpy(union_x.salt_p, &saltp, sizeof(saltp)); return union_x.salt_p; } /********************************************************************************* * Now our salt is returned only as a pointer. We *********************************************************************************/ static int salt_hash(void *salt) { unsigned long H; if (!salt) return 0; if ( (curdat.pSetup->flags&MGF_SALTED) == 0) return 0; // salt is now a pointer, but WORD aligned. We remove that word alingment, and simply use the next bits #if ARCH_ALLOWS_UNALIGNED H = *((unsigned long*)salt); #else memcpy(&H, salt, 8); #endif // Mix up the pointer value (H^(H>>9)) so that if we have a fixed sized allocation // that things do get 'stirred' up better. return ( (H^(H>>9)) & (SALT_HASH_SIZE-1) ); } static unsigned dynamic_this_salt_length(const void *v) { const unsigned char *s = (unsigned char*)v; unsigned l = *s++ - '0'; unsigned bits; l <<= 3; l += *s++ - '0'; #if ARCH_ALLOWS_UNALIGNED if (*((uint32_t*)s) == 0x30303030) #else if (!memcmp(s, "0000", 4)) #endif return l; bits = *s++ - '0'; bits <<= 3; bits += *s++ - '0'; bits <<= 3; bits += *s++ - '0'; bits <<= 3; bits += *s++ - '0'; s += l; while(bits) { if (bits & 1) { l += *s; s += *s; ++s; } bits >>= 1; } return l; } /* * dyna compare is required, to get all the shortest * salt strings first, then the next longer, then the * next, and finally the longest. Without this change * there are many dyna formats which will miss finding * hashes, because old dirty salt information gets left * over, blowing the next runs. There are many formats * which try to not clear buffers if they do not need * to, BUT this only works if salts are taken shortest * to longest. This sort builds the list of salts that way */ static int salt_compare(const void *x, const void *y) { /* this is all that is needed in dyna salt_compare(). Dyna is a pointer to a string, NOT the actual string. The first 2 bytes of string are length (base 8 ascii) */ const char *X = *((const char**)x); const char *Y = *((const char**)y); int l1, l2, l; if (*X<*Y) return -1; if (*X>*Y) return 1; if (X[1]<Y[1]) return -1; if (X[1]>Y[1]) return 1; // we had to make the salt order 100% deterministic, so that intersalt-restore l = l1 = dynamic_this_salt_length(X); l2 = dynamic_this_salt_length(Y); if (l2 < l) l = l2; l = memcmp(&X[6], &Y[6], l); if (l) return l; if (l1==l2) return 0; if (l1 > l2) return 1; return -1; } void dynamic_salt_md5(struct db_salt *s) { MD5_CTX ctx; int len; const char *S = *((const char**)s->salt); MD5_Init(&ctx); len = dynamic_this_salt_length(S); MD5_Update(&ctx, S + 6, len); MD5_Final((unsigned char*)(s->salt_md5), &ctx); } /********************************************************************************* * Gets the binary value from a base-16 hash. *********************************************************************************/ static void *get_binary(char *_ciphertext) { static char *realcipher; unsigned int i; char *ciphertext = _ciphertext; if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE_SHA, MEM_ALIGN_WORD); if (!strncmp(_ciphertext, "$dynamic_", 9)) { ciphertext += 9; while (*ciphertext++ != '$') ; } for (i=0;i<BINARY_SIZE;i++) { realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } return (void *)realcipher; } // NOTE NOTE NOTE, we have currently ONLY implemented a non-salted function!!! static char *source(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 16; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_20_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 20; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_28_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 28; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_32_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 32; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_40_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 40; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_48_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 48; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_64_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 64; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } /********************************************************************************* * Gets the binary value from a base-64 hash *********************************************************************************/ static void * binary_b64m(char *ciphertext) { unsigned int i; static unsigned char *b; char *pos; if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } i = base64_valid_length(pos, e_b64_mime, 0, 0); base64_convert(pos, e_b64_mime, i, b, e_b64_raw, 64+3, 0, 0); //printf("\nciphertext=%s\n", ciphertext); //dump_stuff_msg("binary", b, 16); return b; } static void * binary_b64(char *ciphertext) { unsigned int i; static unsigned char *b; char *pos; if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } i = base64_valid_length(pos, e_b64_crypt, 0, 0); base64_convert(pos, e_b64_cryptBS, i, b, e_b64_raw, 64+3, 0, 0); //printf("\nciphertext=%s\n", ciphertext); //dump_stuff_msg("binary", b, 16); return b; } static void * binary_b64b(char *ciphertext) { unsigned int i; static unsigned char *b; char *pos; if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } i = base64_valid_length(pos, e_b64_crypt, 0, 0); base64_convert(pos, e_b64_crypt, i, b, e_b64_raw, 64+3, 0, 0); //printf("\nciphertext=%s\n", ciphertext); //dump_stuff_msg("binary", b, 16); return b; } #define TO_BINARY(b1, b2, b3) \ value = \ (MD5_word)atoi64[ARCH_INDEX(pos[0])] | \ ((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ b[b1] = value >> 16; \ b[b2] = value >> 8; \ b[b3] = value; static void * binary_b64a(char *ciphertext) { static unsigned char *b; char *pos; MD5_word value; if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } TO_BINARY(0, 6, 12); TO_BINARY(1, 7, 13); TO_BINARY(2, 8, 14); TO_BINARY(3, 9, 15); TO_BINARY(4, 10, 5); b[11] = (MD5_word)atoi64[ARCH_INDEX(pos[0])] | ((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6); MD5_swap((MD5_word*)b,(MD5_word*)b, 4); return b; } /********************************************************************************* * Gets the binary value from a base-64 hash (such as cisco PIX) *********************************************************************************/ static void * binary_b64_4x6(char *ciphertext) { static uint32_t *b; unsigned int i; char *pos; if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } for (i = 0; i < 4; i++) { b[i] = atoi64[ARCH_INDEX(pos[i*4 + 0])] + (atoi64[ARCH_INDEX(pos[i*4 + 1])] << 6) + (atoi64[ARCH_INDEX(pos[i*4 + 2])] << 12) + (atoi64[ARCH_INDEX(pos[i*4 + 3])] << 18); } MD5_swap(b,b, 4); return (void *)b; } /********************************************************************************* * Here is the main mdg_generic fmt_main. NOTE in its default settings, it is * ready to handle base-16 hashes. *********************************************************************************/ static struct fmt_main fmt_Dynamic = { { FORMAT_LABEL, FORMAT_NAME, #ifdef SIMD_COEF_32 ALGORITHM_NAME, #else ALGORITHM_NAME_X86, #endif BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, #ifdef SIMD_COEF_32 PLAINTEXT_LENGTH, #else PLAINTEXT_LENGTH_X86, #endif BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #else MIN_KEYS_PER_CRYPT_X86, MAX_KEYS_PER_CRYPT_X86, #endif #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, { NULL }, dynamic_tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, salt_compare, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; /************************************************************** ************************************************************** ************************************************************** ************************************************************** * These are the md5 'primitive' functions that are used by * the build-in expressions, and by the expression generator * They load passwords, salts, user ids, do crypts, convert * crypts into base-16, etc. They are pretty encompassing, * and have been found to be able to do most anything with * a standard 'base-16' md5 hash, salted or unsalted that * fits a 'simple' php style expression. ************************************************************** ************************************************************** ************************************************************** *************************************************************/ static void Dynamic_Load_itoa16_w2() { char buf[3]; unsigned int i; for (i = 0; i < 256; ++i) { sprintf(buf, "%X%X", i>>4, i&0xF); memcpy(&(itoa16_w2_u[i]), buf, 2); sprintf(buf, "%x%x", i>>4, i&0xF); memcpy(&(itoa16_w2_l[i]), buf, 2); } } #ifdef SIMD_COEF_32 /************************************************************** ************************************************************** * Here are some 'helpers' to our helpers, when it comes to * loading data into the mmx/sse buffers. We have several * of these common helper functions, and use them in 'most' * of the helper primitives, instead of having the same * code being inlined in each of them. ************************************************************** *************************************************************/ static void __SSE_append_output_base16_to_input(uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #3 // 5955K (core2, $dynamic_2$) // 1565K (core2, $dynamic_1006$) // 3381K (ath64, $dynamic_2$) // 824.7k (ath64, $dynamic_1006$) #undef inc #define inc ((SIMD_COEF_32-1) * 2) unsigned short *IPBw = (unsigned short*)IPBdw; IPBw += (idx_mod<<1); CRY += (idx_mod<<2); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw = 0x80; #undef inc } static void __SSE_overwrite_output_base16_to_input(uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #3 // 5955K (core2, $dynamic_2$) // 1565K (core2, $dynamic_1006$) // 3381K (ath64, $dynamic_2$) // 824.7k (ath64, $dynamic_1006$) #undef inc #define inc ((SIMD_COEF_32-1) * 2) unsigned short *IPBw = (unsigned short *)IPBdw; IPBw += (idx_mod<<1); CRY += (idx_mod<<2); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; #undef inc } static void __SSE_append_output_base16_to_input_semi_aligned_2(unsigned int ip, uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #1 // 9586k/4740k (core2, $dynamic_9$) // 5113k/4382k (core2,$dynamic_10$) // (ath64, $dynamic_9$) // (ath64, $dynamic_10$) #define inc SIMD_COEF_32 #define incCRY ((SIMD_COEF_32 - 1) * 4) // Ok, here we are 1/2 off. We are starting in the 'middle' of a DWORD (and end // in the middle of the last one). // start our pointers out at the right 32 bit offset into the first MMX/SSE buffer IPBdw += idx_mod; IPBdw += (ip>>2)*SIMD_COEF_32; CRY += (idx_mod<<2); // first byte handled here. *IPBdw &= 0xFFFF; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); CRY += incCRY; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); CRY += incCRY; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); CRY += incCRY; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); // Add the 0x80 at the proper location (offset 0x21) *IPBdw |= 0x800000; #undef inc #undef incCRY } static void __SSE_append_output_base16_to_input_semi_aligned_0(unsigned int ip, uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #2 // 6083k (core2, $dynamic_2$) // 1590K (core2, $dynamic_1006$) // 3537K (ath64, $dynamic_2$) // 890.3K (ath64, $dynamic_1006$) #undef inc #define inc SIMD_COEF_32 #define incCRY (4*SIMD_COEF_32-2) // start our pointers out at the right 32 bit offset into the first MMX/SSE buffer IPBdw += idx_mod; IPBdw += (ip>>2)*SIMD_COEF_32; CRY += (idx_mod<<2); *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; // CRY += (inc*3)+2; CRY += incCRY; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; // CRY += (inc*3)+2; CRY += incCRY; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; // CRY += (inc*3)+2; CRY += incCRY; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); // Add the 0x80 at the proper location (offset 0x21) IPBdw += inc; *IPBdw = 0x80; #undef inc #undef incCRY } static void __SSE_append_string_to_input_unicode(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80) { unsigned char *cpO; #if ARCH_LITTLE_ENDIAN // if big-endian, we gain nothing from this function (since we would have to byte swap) if (len>1&&!(bf_ptr&1)) { unsigned int w32_cnt; if (bf_ptr&2) { cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; bf_ptr += 2; *cpO = *cp++; cpO[1] = 0; --len; } w32_cnt = len>>1; if (w32_cnt) { uint32_t *wpO; wpO = (uint32_t*)&IPB[GETPOS(bf_ptr, idx_mod)]; len -= (w32_cnt<<1); bf_ptr += (w32_cnt<<2); do { uint32_t x = 0; x = cp[1]; x <<= 16; x += cp[0]; *wpO = x; cp += 2; wpO += SIMD_COEF_32; } while (--w32_cnt); } } #endif cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; while (len--) { *cpO++ = *cp++; if ( ((++bf_ptr)&3) == 0) cpO += ((SIMD_COEF_32-1)*4); *cpO++ = 0; if ( ((++bf_ptr)&3) == 0) cpO += ((SIMD_COEF_32-1)*4); } if (bUpdate0x80) *cpO = 0x80; } static void __SSE_append_string_to_input(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80) { unsigned char *cpO; // if our insertion point is on an 'even' DWORD, then we use DWORD * copying, as long as we can // This provides quite a nice speedup. #if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED // if big-endian, we gain nothing from this function (since we would have to byte swap) if (len>3&&(bf_ptr&3)) { cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; while (len--) { *cpO++ = *cp++; if ( ((++bf_ptr)&3) == 0) { if (!len) { if (bUpdate0x80) *cpO = 0x80; return; } break; } } } if (len>3&&!(bf_ptr&3)) { unsigned int w32_cnt = len>>2; if (w32_cnt) { uint32_t *wpO; wpO = (uint32_t*)&IPB[GETPOS(bf_ptr, idx_mod)]; len -= (w32_cnt<<2); bf_ptr += (w32_cnt<<2); do { *wpO = *((uint32_t*)cp); cp += 4; wpO += SIMD_COEF_32; } while (--w32_cnt); } if (!len) { if (bUpdate0x80) IPB[GETPOS(bf_ptr, idx_mod)] = 0x80; return; } } #endif cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; while (len--) { *cpO++ = *cp++; if ( ((++bf_ptr)&3) == 0) cpO += ((SIMD_COEF_32-1)*4); } if (bUpdate0x80) *cpO = 0x80; } #endif // #ifdef SIMD_COEF_32 from way above. inline static void __append_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len) { unsigned int j; unsigned int til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (!utf16) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; total_len[idx][idx_mod] += len; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1); } } else { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; total_len[idx][idx_mod] += outlen; // note we use the 'non' unicode variant, since we have already computed the unicode, and length properly __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } } else { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; total_len[idx][idx_mod] += len << 1; __SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1); } } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = (unsigned char*)utf16Str; if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (z = 0; z < outlen; ++z) { *cp++ = *cpi++; } total_len_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = Str; if (total_len_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (z = 0; z < len; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len_X86[j] += (len<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), Str, len); else #endif memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), Str, len); total_len_X86[j] += len; } } } inline static void __append2_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len) { unsigned int j; unsigned int til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (!utf16) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; total_len2[idx][idx_mod] += len; __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1); } } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; total_len2[idx][idx_mod] += outlen; // note we use the 'non' unicode variant of __SSE_append_string_to_input(), since it's already unicode, and length properly __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } } else { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; total_len2[idx][idx_mod] += len << 1; __SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1); } } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = (unsigned char*)utf16Str; if (total_len2_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (z = 0; z < outlen; ++z) { *cp++ = *cpi++; } total_len2_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = Str; if (total_len2_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (z = 0; z < len; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len2_X86[j] += (len<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), Str, len); else #endif memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), Str, len); total_len2_X86[j] += len; } } } void DynamicFunc__setmode_unicodeBE(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead. { md5_unicode_convert_set(2,tid); } void DynamicFunc__setmode_unicode(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead. { md5_unicode_convert_set(1,tid); } void DynamicFunc__setmode_normal (DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead. { md5_unicode_convert_set(0,tid); } /************************************************************** * DYNAMIC primitive helper function * Clears the input variable, and input 'lengths' *************************************************************/ void DynamicFunc__clean_input(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input(); #else unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf[x].c, 0, sizeof(input_buf[0])); memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0])); ++x; } return; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i])); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i])); total_len_X86[i] = 0; } #endif } void DynamicFunc__clean_input2(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input2(); #else unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf2[x].c, 0, sizeof(input_buf2[0])); memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0])); ++x; } return; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i])); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i])); total_len2_X86[i] = 0; } #endif } void DynamicFunc__clean_input_full(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input_full(); #else unsigned int i; #ifdef SIMD_COEF_32 unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf[x].c, 0, sizeof(input_buf[0])); memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0])); ++x; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i])); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i])); total_len_X86[i] = 0; } #endif } void DynamicFunc__clean_input2_full(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input2_full(); #else unsigned int i; #ifdef SIMD_COEF_32 unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf2[x].c, 0, sizeof(input_buf2[0])); memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0])); ++x; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i])); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i])); total_len2_X86[i] = 0; } #endif } void DynamicFunc__clean_input_kwik(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input_kwik(); #else #ifdef SIMD_COEF_32 unsigned int i; if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) memset(total_len[x++], 0, SIMD_COEF_32 * sizeof(total_len[0][0])); return; } #else unsigned int i; #endif for (i = first; i < last; ++i) { #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, total_len_X86[i]+5); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, total_len_X86[i]+5); #endif total_len_X86[i] = 0; } #endif } void DynamicFunc__clean_input2_kwik(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input2_kwik(); #else #ifdef SIMD_COEF_32 unsigned int i; if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) memset(total_len2[x++], 0, SIMD_COEF_32 * sizeof(total_len2[0][0])); return; } #else unsigned int i; #endif for (i = first; i < last; ++i) { #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, total_len2_X86[i]+5); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, total_len2_X86[i]+5); #endif total_len2_X86[i] = 0; } #endif } /************************************************************** * DYNAMIC primitive helper function * Appends all keys to the end of the input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_keys(DYNA_OMP_PARAMS) { unsigned int j; unsigned int til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; int maxlen=27; if (curdat.pSetup->MaxInputLen < maxlen) maxlen = curdat.pSetup->MaxInputLen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } total_len[idx][idx_mod] += outlen; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } else { total_len[idx][idx_mod] += (saved_key_len[j] << 1); __SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } else { total_len[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi; UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } // only copy data if it will NOT trash the buffer if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z) *cp++ = *cpi++; total_len_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)saved_key[j]; if (total_len_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (z = 0; z < saved_key_len[j]; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len_X86[j] += (saved_key_len[j]<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], saved_key_len[j]); else #endif memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], saved_key_len[j]); total_len_X86[j] += saved_key_len[j]; } } } // DynamicFunc__append_keys_pad16 // append the array of keys to the array input1[], padding with nulls to 16 bytes, if input shorter. // Needed for net-md5 and net-sha1 formats. void DynamicFunc__append_keys_pad16(DYNA_OMP_PARAMS) { unsigned int j; unsigned int til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' if (saved_key_len[j] < 16) { char buf[24]; strncpy(buf, saved_key[j], 18); total_len[idx][idx_mod] += 16; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,16,bf_ptr,1); } else { total_len[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif for (; j < til; ++j) { saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' #if MD5_X2 if (j&1) strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 17); else #endif strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 17); total_len_X86[j] += 16; } } void DynamicFunc__append_keys_pad20(DYNA_OMP_PARAMS) { unsigned int j; unsigned int til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' if (saved_key_len[j] < 20) { char buf[28]; strncpy(buf, saved_key[j], 22); total_len[idx][idx_mod] += 20; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,20,bf_ptr,1); } else { total_len[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif for (; j < til; ++j) { saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' #if MD5_X2 if (j&1) strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 21); else #endif strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 21); total_len_X86[j] += 20; } } /************************************************************** * DYNAMIC primitive helper function * Appends all keys to the end of the 2nd input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_keys2(DYNA_OMP_PARAMS) { unsigned int j, til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; int maxlen=27; if (curdat.pSetup->MaxInputLen < maxlen) maxlen = curdat.pSetup->MaxInputLen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } total_len2[idx][idx_mod] += outlen; __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } else { total_len2[idx][idx_mod] += (saved_key_len[j] << 1); __SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } else { total_len2[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi; UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } // only copy data if it will NOT trash the buffer if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z) *cp++ = *cpi++; total_len2_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)saved_key[j]; if (total_len2_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (z = 0; z < saved_key_len[j]; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len2_X86[j] += (saved_key_len[j]<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), saved_key[j], saved_key_len[j]); else #endif memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), saved_key[j], saved_key_len[j]); total_len2_X86[j] += saved_key_len[j]; } } } void DynamicFunc__set_input_len_16(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 16, then remove existing end of buffer marker, and then set // one at offset 16 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len[j][k]; if (this_item_len < 16) input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80; total_len[j][k] = 16; } } return; } #endif for (; j < til; ++j) { // TODO: this code MAY need buffer cleaned up if we are using md5_go code!!! #if MD5_X2 if (j&1) { while (total_len_X86[j] < 16) input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0; } else #endif {while (total_len_X86[j] < 16) input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;} total_len_X86[j] = 16; } } void DynamicFunc__set_input2_len_16(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 16, then remove existing end of buffer marker, and then set // one at offset 16 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len2[j][k]; if (this_item_len < 16) input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf2[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 16; } } return; } #endif for (; j < til; ++j) { // TODO: this code MAY need buffer cleaned up if we are using md5_go code!!! #if MD5_X2 if (j&1) { while (total_len2_X86[j] < 16) input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0; } else #endif {while (total_len2_X86[j] < 16) input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;} total_len2_X86[j] = 16; } } void DynamicFunc__set_input_len_20(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 20, then remove existing end of buffer marker, and then set // one at offset 20 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len[j][k]; if (this_item_len < 20) input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80; total_len[j][k] = 20; } } return; } #endif for (; j < til; ++j) { #if MD5_X2 if (j&1) { while (total_len_X86[j] < 20) input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0; } else #endif {while (total_len_X86[j] < 20) input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;} total_len_X86[j] = 20; } } void DynamicFunc__set_input2_len_20(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 20, then remove existing end of buffer marker, and then set // one at offset 20 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len2[j][k]; if (this_item_len < 20) input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf2[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 20; } } return; } #endif for (; j < til; ++j) { #if MD5_X2 if (j&1) { while (total_len2_X86[j] < 20) input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0; } else #endif {while (total_len2_X86[j] < 20) input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;} total_len2_X86[j] = 20; } } void DynamicFunc__set_input_len_32(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len_X86[j] = 32; } void DynamicFunc__set_input_len_32_cleartop(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { unsigned int k; for (k = 0; k < SIMD_COEF_32; ++k) { input_buf[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80; total_len[j][k] = 32; } } return; } #endif for (; j < til; ++j) { total_len_X86[j] = 32; #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (j&1) { //MD5_swap(input_buf_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8); memset(&(input_buf_X86[j>>MD5_X2].x2.B2[32]), 0, 24); } else #endif { //MD5_swap(input_buf_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8); memset(&(input_buf_X86[j>>MD5_X2].x1.B[32]), 0, 24); } #endif } } void DynamicFunc__set_input2_len_32(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len2_X86[j] = 32; } void DynamicFunc__set_input2_len_32_cleartop(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { unsigned int k; for (k = 0; k < SIMD_COEF_32; ++k) { input_buf2[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 32; } } return; } #endif for (; j < til; ++j) { total_len2_X86[j] = 32; #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (j&1) { //MD5_swap(input_buf2_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8); memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[32]), 0, 24); } else #endif { //MD5_swap(input_buf2_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8); memset(&(input_buf2_X86[j>>MD5_X2].x1.B[32]), 0, 24); } #endif } } void DynamicFunc__set_input_len_40(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len_X86[j] = 40; } void DynamicFunc__set_input2_len_40(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len2_X86[j] = 40; } void DynamicFunc__set_input2_len_40_cleartop(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { unsigned int k; for (k = 0; k < SIMD_COEF_32; ++k) { input_buf2[j].c[GETPOS(40, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 40; } } return; } #endif for (; j < til; ++j) { total_len2_X86[j] = 40; #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (j&1) { memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[40]), 0, 16); } else #endif { memset(&(input_buf2_X86[j>>MD5_X2].x1.B[40]), 0, 16); } #endif } } void DynamicFunc__set_input_len_64(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_64 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 64; } void DynamicFunc__set_input2_len_64(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_64 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 64; } void DynamicFunc__set_input_len_100(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_100 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) { unsigned char *cp; #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); while (*cp) *cp++ = 0; total_len_X86[j] = 100; } } void DynamicFunc__set_input_len_24(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_24 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 24; } void DynamicFunc__set_input_len_28(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_28 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 28; } void DynamicFunc__set_input_len_48(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_48 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 48; } void DynamicFunc__set_input_len_56(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_56 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 56; } void DynamicFunc__set_input_len_80(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_80 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 80; } void DynamicFunc__set_input_len_96(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_96 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 96; } void DynamicFunc__set_input_len_112(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_112 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 112; } void DynamicFunc__set_input_len_128(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_128 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 128; } void DynamicFunc__set_input_len_160(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_160 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 160; } void DynamicFunc__set_input_len_192(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_192 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 192; } void DynamicFunc__set_input_len_256(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_256 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 256; } void DynamicFunc__set_input2_len_24(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_24 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 24; } void DynamicFunc__set_input2_len_28(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_28 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 28; } void DynamicFunc__set_input2_len_48(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_48 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 48; } void DynamicFunc__set_input2_len_56(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_56 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 56; } void DynamicFunc__set_input2_len_80(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_80 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 80; } void DynamicFunc__set_input2_len_96(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_96 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 96; } void DynamicFunc__set_input2_len_112(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_112 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 112; } void DynamicFunc__set_input2_len_128(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_128 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 128; } void DynamicFunc__set_input2_len_160(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_160 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 160; } void DynamicFunc__set_input2_len_192(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_192 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 192; } void DynamicFunc__set_input2_len_256(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_256 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 256; } /************************************************************** * DYNAMIC primitive helper function * Appends the salt to the end of the input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_salt(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm cursalt, saltlen); } /************************************************************** * DYNAMIC primitive helper function * Appends the salt to the end of the 2nd input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_salt2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm cursalt, saltlen); } void DynamicFunc__append_input_from_input2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len[i][j]; unsigned int len1 = total_len2[i][j]; for (k = 0; k < len1; ++k) input_buf[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)]; input_buf[i].c[GETPOS((len1+start_len), j)] = 0x80; total_len[i][j] += len1; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]); else #endif memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]); total_len_X86[i] += total_len2_X86[i]; } } void DynamicFunc__append_input2_from_input(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len2[i][j]; unsigned int len1 = total_len[i][j]; for (k = 0; k < len1; ++k) input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)]; input_buf2[i].c[GETPOS((len1+start_len), j)] = 0x80; total_len2[i][j] += len1; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]); else #endif memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]); total_len2_X86[i] += total_len_X86[i]; } } void DynamicFunc__append_input_from_input(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len[i][j]; for (k = 0; k < start_len; ++k) input_buf[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)]; input_buf[i].c[GETPOS((start_len+start_len), j)] = 0x80; total_len[i][j] += start_len; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]); else #endif memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]); total_len_X86[i] <<= 1; } } void DynamicFunc__append_input2_from_input2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len2[i][j]; for (k = 0; k < start_len; ++k) input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)]; input_buf2[i].c[GETPOS((start_len+start_len), j)] = 0x80; total_len2[i][j] += start_len; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]); else #endif memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]); total_len2_X86[i] <<= 1; } } #ifdef SIMD_PARA_MD5 static void SSE_Intrinsics_LoadLens_md5(int side, int i) { uint32_t *p; unsigned int j, k; if (side == 0) { for (j = 0; j < SIMD_PARA_MD5; j++) { p = input_buf[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3; } } else { for (j = 0; j < SIMD_PARA_MD5; j++) { p = input_buf2[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3; } } } #endif #ifdef SIMD_PARA_MD4 static void SSE_Intrinsics_LoadLens_md4(int side, int i) { uint32_t *p; unsigned int j, k; if (side == 0) { for (j = 0; j < SIMD_PARA_MD4; j++) { p = input_buf[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3; } } else { for (j = 0; j < SIMD_PARA_MD4; j++) { p = input_buf2[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3; } } } #endif /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the first input field. The data is * still in the binary encrypted format, in the crypt_key. * we do not yet convert to base-16. This is so we can output * as base-16, or later, if we add base-64, we can output to * that format instead. *************************************************************/ void DynamicFunc__crypt_md5(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD5) { SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(0, i); SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md4(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD4) { SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(0, i); SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__POCrypt(DYNA_OMP_PARAMS) { unsigned int i, j; unsigned int til, len; unsigned char *pBuf; #if MD5_X2 unsigned char *pBuf2; unsigned int lens[2]; #endif #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif //DynamicFunc__clean_input_kwik(); //DynamicFunc__append_salt, //DynamicFunc__append_input1_from_CONST1, //DynamicFunc__append_keys, //DynamicFunc__append_input1_from_CONST2, //DynamicFunc__append_salt, //DynamicFunc__crypt_md5, pBuf = input_buf_X86[i>>MD5_X2].x1.B; #if MD5_X2 pBuf2 = input_buf_X86[i>>MD5_X2].x2.B2; memset(pBuf2, 0, sizeof(input_buf_X86[i>>MD5_X2].x2.B2)); memcpy(pBuf2, cursalt, 32); pBuf2[32] = 'Y'; #endif memset(pBuf, 0, sizeof(input_buf_X86[i>>MD5_X2].x1.b)); memcpy(pBuf, cursalt, 32); pBuf[32] = 'Y'; for (j = i; j < til; ++j) { len = saved_key_len[j]; memcpy(&pBuf[33], saved_key[j], len); pBuf[33+len] = 0xf7; memcpy(&pBuf[34+len], cursalt, 32); #if MD5_X2 lens[0] = len+66; // len from the 'first' ++j; if (j < m_count) { len = saved_key_len[j]; memcpy(&pBuf2[33], saved_key[j], len); pBuf2[33+len] = 0xf7; memcpy(&pBuf2[34+len], cursalt, 32); lens[1] = len+66; } else { lens[1] = 0; } DoMD5(input_buf_X86[i>>MD5_X2], lens, crypt_key_X86[j>>MD5_X2]); #else DoMD5(input_buf_X86[i>>MD5_X2], (len+66), crypt_key_X86[j]); #endif } } /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the 2nd input field into crypt_keys2. *************************************************************/ void DynamicFunc__crypt2_md5(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(1, i); SIMDmd5body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i < m_count) len[1] = total_len2_X86[i]; else len[1] = 0; #else unsigned int len = total_len2_X86[i]; #endif DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } void DynamicFunc__crypt2_md4(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(1, i); SIMDmd4body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len2_X86[i]; #else unsigned int len = total_len2_X86[i]; #endif DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the 1st input field crypt_keys2. *************************************************************/ void DynamicFunc__crypt_md5_in1_to_out2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD5) { SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(0, i); SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md4_in1_to_out2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD4) { SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(0, i); SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the 2nd input field into crypt_keys. *************************************************************/ void DynamicFunc__crypt_md5_in2_to_out1(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(1, i); SIMDmd5body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); //dump_stuff_mmx_msg("DynamicFunc__crypt_md5_in2_to_out1", input_buf2[i].c,64,m_count-1); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len2_X86[i]; #else unsigned int len = total_len2_X86[i]; #endif DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md4_in2_to_out1(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(1, i); SIMDmd4body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len2_X86[i]; #else unsigned int len = total_len2_X86[i]; #endif DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { unsigned int j, k; SSE_Intrinsics_LoadLens_md5(0, i); // NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes // each, and we are doing 3 at a time, we can NOT directly write to the // input buff, but have to use the crypt_key buffer, and then memcpy when done. SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); for (j = 0; j < SIMD_PARA_MD5; ++j) { memset(input_buf[i+j].c, 0, sizeof(input_buf[0])); memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32); for (k = 0; k < SIMD_COEF_32; k++) total_len[i+j][k] = 16; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i]; total_len_X86[i++] = 0x10; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]); total_len_X86[i] = 0x10; } } void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { unsigned int j; SSE_Intrinsics_LoadLens_md5(0, i); // NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes // each, and we are doing 3 at a time, we can NOT directly write to the // input buff, but have to use the crypt_key buffer, and then memcpy when done. SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); for (j = 0; j < SIMD_PARA_MD5; ++j) memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { unsigned int j; // NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes // each, and we are doing 3 at a time, we can NOT directly write to the // input buff, but have to use the crypt_key buffer, and then memcpy when done. SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); for (j = 0; j < SIMD_PARA_MD5; ++j) memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif // we call DoMD5o so as to 'not' change then length (it was already set) DoMD5o(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]); } } void DynamicFunc__overwrite_salt_to_input1_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { __SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0); } } else { for (; j < til; ++j) __SSE_append_string_to_input_unicode(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0); } return; } for (; j < til; ++j) __SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0); return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)utf16Str; #if MD5_X2 if (j&1) cp = input_buf_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf_X86[j>>MD5_X2].x1.B; for (z = 0; z < outlen; ++z) *cp++ = *cpi++; } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)cursalt; #if MD5_X2 if (j&1) cp = input_buf_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf_X86[j>>MD5_X2].x1.B; for (z = 0; z < saltlen; ++z) { *cp++ = *cpi++; *cp++ = 0; } } } return; } for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(input_buf_X86[j>>MD5_X2].x2.b2, cursalt, saltlen); else #endif memcpy(input_buf_X86[j>>MD5_X2].x1.b, cursalt, saltlen); } } void DynamicFunc__overwrite_salt_to_input2_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { __SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0); } } else { for (; j < til; ++j) __SSE_append_string_to_input_unicode(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0); } return; } for (; j < til; ++j) __SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0); return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)utf16Str; #if MD5_X2 if (j&1) cp = input_buf2_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf2_X86[j>>MD5_X2].x1.B; for (z = 0; z < outlen; ++z) *cp++ = *cpi++; } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)cursalt; #if MD5_X2 if (j&1) cp = input_buf2_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf2_X86[j>>MD5_X2].x1.B; for (z = 0; z < saltlen; ++z) { *cp++ = *cpi++; *cp++ = 0; } } } return; } for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(input_buf2_X86[j>>MD5_X2].x2.b2, cursalt, saltlen); else #endif memcpy(input_buf2_X86[j>>MD5_X2].x1.b, cursalt, saltlen); } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input1 from the output2 data using base-16 *************************************************************/ void DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; j < til; ++j) { idx = ( ((unsigned int)j)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, j&(SIMD_COEF_32-1)); } return; } #endif for (; j < til; ++j) { unsigned char *cpo, *cpi; unsigned int i; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input1 from the output1 data using base-16 *************************************************************/ void DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; j < til; ++j) { idx = ( ((unsigned int)j)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); } return; } #endif for (; j < til; ++j) { unsigned char *cpo, *cpi; unsigned int i; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys (the encrypted * 'first' key variable), and use a base-16 text formatting, and * append this to the first input buffer (adjusting the lengths) *************************************************************/ void DynamicFunc__append_from_last_output_as_base16(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; j < til; ++j) { unsigned int ip; idx = ( ((unsigned int)j)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][j & (SIMD_COEF_32 - 1)]; total_len[idx][j & (SIMD_COEF_32 - 1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). unsigned int k; for (k = 0; k < 16; ++k) { unsigned char v = crypt_key[idx].c[GETPOS(k, j&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+(k<<1), j&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf[idx].c[GETPOS(ip+(k<<1)+1, j&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf[idx].c[GETPOS(ip+32, j&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); } return; } #endif for (; j < til; ++j) { unsigned char *cp, *cpi; unsigned int i; #if MD5_X2 if (j&1) {cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x1.B; } for (i = 0; i < 16; ++i) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len_X86[j] += 32; } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys2 (the encrypted * 'second' key variable), and base-16 appends to the 2nd input *************************************************************/ void DynamicFunc__append_from_last_output2_as_base16(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { unsigned int ip, j; idx = ( ((unsigned int)i)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][i&(SIMD_COEF_32-1)]; total_len2[idx][i&(SIMD_COEF_32-1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). for (j = 0; j < 16; ++j) { unsigned char v = crypt_key2[idx].c[GETPOS(j, i&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+(j<<1), i&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf2[idx].c[GETPOS(ip+(j<<1)+1, i&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf2[idx].c[GETPOS(ip+32, i&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len2_X86[i] += 32; } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input2 from the output1 data using base-16 * an optimization, if the same thing is done over and over * again, such as md5(md5(md5(md5($p)))) There, we would only * call the copy and set length once, then simply call copy. *************************************************************/ void DynamicFunc__overwrite_from_last_output_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int i, til,j; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { idx = ( ((unsigned int)i)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif j = i; for (; j < til; ++j) { unsigned char *cpo, *cpi; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } void DynamicFunc__overwrite_from_last_output2_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int i, til,j; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { idx = ( ((unsigned int)i)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif j = i; for (; j < til; ++j) { unsigned char *cpo, *cpi; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf2_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf2_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input2 from the output2 data using base-16 *************************************************************/ void DynamicFunc__overwrite_from_last_output2_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int i, til,j; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { idx = ( ((unsigned int)i)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif j=i; for (; j < til; ++j) { unsigned char *cpo, *cpi; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys1 (the encrypted * 'first' key variable), and base-16 appends to the 2nd input *************************************************************/ void DynamicFunc__append_from_last_output_to_input2_as_base16(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][index&(SIMD_COEF_32-1)]; total_len2[idx][index&(SIMD_COEF_32-1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). for (i = 0; i < 16; ++i) { unsigned char v = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf2[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf2[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1)); } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cpi = crypt_key_X86[i>>MD5_X2].x2.B2; cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); } else #endif {cpi = crypt_key_X86[i>>MD5_X2].x1.B; cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]);} for (j = 0; j < 16; ++j) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len2_X86[i] += 32; } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys2 (the encrypted * 'second' key variable), and base-16 appends to the 1st input *************************************************************/ void DynamicFunc__append_from_last_output2_to_input1_as_base16(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][index&(SIMD_COEF_32-1)]; total_len[idx][index&(SIMD_COEF_32-1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). for (i = 0; i < 16; ++i) { unsigned char v = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1)); } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len_X86[i] += 32; } } void DynamicFunc__append_from_last_output2_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf[idx].w; uint32_t *pi = crypt_key2[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len_X86[i] += 16; } } void DynamicFunc__append2_from_last_output2_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf2[idx].w; uint32_t *pi = crypt_key2[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len2[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len2_X86[i] += 16; } } void DynamicFunc__append_from_last_output1_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index, idx; for (index = i; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf[idx].w; uint32_t *pi = crypt_key[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len_X86[i] += 16; } } void DynamicFunc__append2_from_last_output1_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index, idx; for (index = i; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf2[idx].w; uint32_t *pi = crypt_key[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len2[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len2_X86[i] += 16; } } /************************************************************** * DYNAMIC primitive helper function * Append salt #2 into input 1 *************************************************************/ void DynamicFunc__append_2nd_salt(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2); } /************************************************************** * DYNAMIC primitive helper function * Append salt #2 into input 2 *************************************************************/ void DynamicFunc__append_2nd_salt2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2); } /************************************************************** * DYNAMIC primitive helper function * Append UserID into input 1 *************************************************************/ void DynamicFunc__append_userid(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm username, usernamelen); } /************************************************************** * DYNAMIC primitive helper function * Append UserID into input 2 *************************************************************/ void DynamicFunc__append_userid2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm username, usernamelen); } void DynamicFunc__append_input1_from_CONST1(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]); } void DynamicFunc__append_input1_from_CONST2(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]); } void DynamicFunc__append_input1_from_CONST3(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]); } void DynamicFunc__append_input1_from_CONST4(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]); } void DynamicFunc__append_input1_from_CONST5(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]); } void DynamicFunc__append_input1_from_CONST6(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]); } void DynamicFunc__append_input1_from_CONST7(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]); } void DynamicFunc__append_input1_from_CONST8(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]); } void DynamicFunc__append_input2_from_CONST1(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]); } void DynamicFunc__append_input2_from_CONST2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]); } void DynamicFunc__append_input2_from_CONST3(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]); } void DynamicFunc__append_input2_from_CONST4(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]); } void DynamicFunc__append_input2_from_CONST5(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]); } void DynamicFunc__append_input2_from_CONST6(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]); } void DynamicFunc__append_input2_from_CONST7(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]); } void DynamicFunc__append_input2_from_CONST8(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]); } void DynamicFunc__append_fld0(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]); } void DynamicFunc__append_fld1(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]); } void DynamicFunc__append_fld2(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]); } void DynamicFunc__append_fld3(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]); } void DynamicFunc__append_fld4(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]); } void DynamicFunc__append_fld5(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]); } void DynamicFunc__append_fld6(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]); } void DynamicFunc__append_fld7(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]); } void DynamicFunc__append_fld8(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]); } void DynamicFunc__append_fld9(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]); } void DynamicFunc__append2_fld0(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]); } void DynamicFunc__append2_fld1(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]); } void DynamicFunc__append2_fld2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]); } void DynamicFunc__append2_fld3(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]); } void DynamicFunc__append2_fld4(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]); } void DynamicFunc__append2_fld5(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]); } void DynamicFunc__append2_fld6(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]); } void DynamicFunc__append2_fld7(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]); } void DynamicFunc__append2_fld8(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]); } void DynamicFunc__append2_fld9(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]); } void DynamicFunc__SSEtoX86_switch_input1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx, max; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = input_buf_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = input_buf_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = input_buf_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = input_buf[idx].w; max = total_len_X86[j] = (total_len[idx][0]); for (i = 1; i < SIMD_COEF_32; i++) if (max < (total_len_X86[j+i] = total_len[idx][j])) max = total_len_X86[j+i]; max = (max+3)>>2; for (k = 0; k < max; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { input_buf_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0; input_buf_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0; } #else for (i = 0; i < SIMD_COEF_32; i++) input_buf_X86[j+i].x1.b[total_len_X86[j+i]] = 0; #endif } #endif } void DynamicFunc__SSEtoX86_switch_input2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx, max; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = input_buf2_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = input_buf2_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = input_buf2_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = input_buf2[idx].w; max = total_len2_X86[j] = (total_len2[idx][0]); for (i = 1; i < SIMD_COEF_32; i++) if (max < (total_len2_X86[j+i] = total_len2[idx][i])) max = total_len2_X86[j+i]; max = (max+3)>>2; for (k = 0; k < max; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } // get rid of the 0x80 #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { input_buf2_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0; input_buf2_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0; } #else for (i = 0; i < SIMD_COEF_32; i++) input_buf2_X86[j+i].x1.b[total_len2_X86[j+i]] = 0; #endif } #endif } void DynamicFunc__SSEtoX86_switch_output1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if MD5_X2 for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key_X86[j+i].x1.w; #endif idx = j/SIMD_COEF_32; cpi = (void*)crypt_key[idx].c; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } } #endif } void DynamicFunc__SSEtoX86_switch_output2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key2_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = crypt_key2[idx].w; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } } #endif } void DynamicFunc__X86toSSE_switch_input1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int j, idx, idx_mod; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; __nonMP_DynamicFunc__clean_input(); for (j = 0; j < m_count; ++j) { idx = j/SIMD_COEF_32; idx_mod = j&(SIMD_COEF_32-1); total_len[idx][idx_mod] += total_len_X86[j]; #if (MD5_X2) if (j & 1) __SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>1].x2.B2,total_len_X86[j],0,1); else #endif __SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>MD5_X2].x1.B,total_len_X86[j],0,1); } #endif } void DynamicFunc__X86toSSE_switch_input2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int j, idx, idx_mod; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; __nonMP_DynamicFunc__clean_input2(); for (j = 0; j < m_count; ++j) { idx = j/SIMD_COEF_32; idx_mod = j&(SIMD_COEF_32-1); total_len2[idx][idx_mod] += total_len2_X86[j]; #if (MD5_X2) if (j & 1) __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>1].x2.B2,total_len2_X86[j],0,1); else #endif __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>MD5_X2].x1.B,total_len2_X86[j],0,1); } #endif } void DynamicFunc__X86toSSE_switch_output1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = (void*)crypt_key[idx].c; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpi++ = *cpo[i]++; } } #endif } void DynamicFunc__X86toSSE_switch_output2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key2_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = crypt_key2[idx].w; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpi++ = *cpo[i]++; } } #endif } // This function, simply 'switches' back to SSE It does NOT copy any data from X86 to SSE void DynamicFunc__ToSSE(DYNA_OMP_PARAMS) { if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; } // This function, simply 'switches' to X86 It does NOT copy any data from SSE to X86 void DynamicFunc__ToX86(DYNA_OMP_PARAMS) { if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; } void DynamicFunc__base16_convert_locase(DYNA_OMP_PARAMS) { dynamic_itoa16 = itoa16; itoa16_w2=itoa16_w2_l; } void DynamicFunc__base16_convert_upcase(DYNA_OMP_PARAMS) { dynamic_itoa16 = itoa16u; itoa16_w2=itoa16_w2_u; } /************************************************************** * DEPRICATED functions. These are the older pseudo functions * which we now have flags for. We keep them, so that we can * add the proper flags, even if the user is running an older * script. *************************************************************/ void DynamicFunc__InitialLoadKeysToInput(DYNA_OMP_PARAMS) {} void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2(DYNA_OMP_PARAMS) {} void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1(DYNA_OMP_PARAMS) {} void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32(DYNA_OMP_PARAMS) {} /************************************************************** ************************************************************** ************************************************************** ************************************************************** * DYNAMIC primitive helper function * This is the END of the primitives. ************************************************************** ************************************************************** ************************************************************** *************************************************************/ static DYNAMIC_primitive_funcp *ConvertFuncs(DYNAMIC_primitive_funcp p, unsigned int *count) { static DYNAMIC_primitive_funcp fncs[20]; *count = 0; if (p==DynamicFunc__InitialLoadKeysToInput || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32) return fncs; // ignore these #ifndef SIMD_COEF_32 if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 || p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 || p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 || p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 || p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86) return fncs; // we ignore these functions 100% in x86 mode. #endif // if (p==DynamicFunc__append_input2_from_CONST1) { // fncs[0] = DynamicFunc__set_input2; // fncs[1] = DynamicFunc__set_CONST1; // fncs[2] = DynamicFunc__append_CONST; // *count = 3; // } /* LOOK INTO THIS!!!!! This may not be valid, now that SHA1 is handled 100% outside of the SSE2 code. But I am not sure just WTF this is supposed to do anyway, since not LE should be using CTX only??? */ #if !ARCH_LITTLE_ENDIAN if (/*p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_append_input2 || /*p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_append_input1 || /*p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 || /*p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 || /*p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 || /*p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 || p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL) curdat.force_md5_ctx = 0; #endif *count = 1; fncs[0] = p; return fncs; } #ifdef _OPENMP static int isBadOMPFunc(DYNAMIC_primitive_funcp p) { // If ANY of these functions are seen, we can NOT use OMP for this single format. #if SIMD_COEF_32 if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 || p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 || p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 || p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 || p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86) return 1; #endif if (p==DynamicFunc__base16_convert_locase || p==DynamicFunc__base16_convert_upcase) return 1; return 0; } #endif #define RETURN_TRUE_IF_BIG_FUNC(H) if (p==DynamicFunc__##H##_crypt_input1_append_input2 || \ p==DynamicFunc__##H##_crypt_input2_append_input1 || \ p==DynamicFunc__##H##_crypt_input1_overwrite_input1 || \ p==DynamicFunc__##H##_crypt_input2_overwrite_input2 || \ p==DynamicFunc__##H##_crypt_input1_overwrite_input2 || \ p==DynamicFunc__##H##_crypt_input2_overwrite_input1 || \ p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL || \ p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL) \ return 1 static int isMD4Func(DYNAMIC_primitive_funcp p) { // handle flats RETURN_TRUE_IF_BIG_FUNC(MD4); // handle older mmx_coef variants if (p==DynamicFunc__crypt_md4 || p==DynamicFunc__crypt_md4_in1_to_out2 || p==DynamicFunc__crypt2_md4 || p==DynamicFunc__crypt_md4_in2_to_out1) return 1; return 0; } #ifdef _OPENMP // Only used in OMP code, to compute LCM granularity. So we #ifdef it out to avoid compiler warnings. #ifdef SIMD_COEF_32 // otherwise unused static int isMD5Func(DYNAMIC_primitive_funcp p) { // handle flats RETURN_TRUE_IF_BIG_FUNC(MD5); // handle older mmx_coef variants if (p==DynamicFunc__crypt_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 || p==DynamicFunc__crypt_md5_in1_to_out2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 || p==DynamicFunc__crypt_md5_to_input_raw || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen || p==DynamicFunc__crypt_md5_in2_to_out1 || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE || p==DynamicFunc__crypt2_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32) return 1; return 0; } #endif #endif static int isSHA1Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA1); return 0; } static int isSHA2_256Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA224); RETURN_TRUE_IF_BIG_FUNC(SHA256); return 0; } static int isSHA2_512Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA384); RETURN_TRUE_IF_BIG_FUNC(SHA512); return 0; } static int isGOSTFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(GOST); return 0; } static int isTigerFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(Tiger); return 0; } static int isWHIRLFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(WHIRLPOOL); return 0; } static int isRIPEMDFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(RIPEMD128); RETURN_TRUE_IF_BIG_FUNC(RIPEMD160); RETURN_TRUE_IF_BIG_FUNC(RIPEMD256); RETURN_TRUE_IF_BIG_FUNC(RIPEMD320); return 0; } static int isHAVALFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(HAVAL128_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_5); return 0; } static int isMD2Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(MD2); return 0; } static int isPANAMAFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(PANAMA); return 0; } static int isSKEINFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SKEIN224); RETURN_TRUE_IF_BIG_FUNC(SKEIN256); RETURN_TRUE_IF_BIG_FUNC(SKEIN384); RETURN_TRUE_IF_BIG_FUNC(SKEIN512); return 0; } static int isKECCAKFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA3_224); RETURN_TRUE_IF_BIG_FUNC(SHA3_256); RETURN_TRUE_IF_BIG_FUNC(SHA3_384); RETURN_TRUE_IF_BIG_FUNC(SHA3_512); RETURN_TRUE_IF_BIG_FUNC(KECCAK_256); RETURN_TRUE_IF_BIG_FUNC(KECCAK_512); return 0; } // LARGE_HASH_EDIT_POINT (Add a new IsXXXFunc() type function) static int isLargeHashFinalFunc(DYNAMIC_primitive_funcp p) { #undef IF #define IF(H) p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL||p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL if (IF(SHA1)||IF(SHA224)||IF(SHA256)||IF(SHA384)||IF(SHA512)||IF(GOST)||IF(WHIRLPOOL)||IF(Tiger)||IF(RIPEMD128)|| IF(RIPEMD160)||IF(RIPEMD256)||IF(RIPEMD320)|| IF(HAVAL128_3)||IF(HAVAL128_4)||IF(HAVAL128_5)||IF(HAVAL160_3)||IF(HAVAL160_4)||IF(HAVAL160_5)|| IF(HAVAL192_3)||IF(HAVAL192_4)||IF(HAVAL192_5)||IF(HAVAL224_3)||IF(HAVAL224_4)||IF(HAVAL224_5)|| IF(HAVAL256_3)||IF(HAVAL256_4)||IF(HAVAL256_5)||IF(MD2)||IF(PANAMA)||IF(SKEIN224)||IF(SKEIN256)|| IF(SKEIN384)||IF(SKEIN512)||IF(SHA3_224)||IF(SHA3_256)||IF(SHA3_384)||IF(SHA3_512)|| IF(KECCAK_256)||IF(KECCAK_512)) // LARGE_HASH_EDIT_POINT return 1; return 0; } #ifdef _OPENMP #ifdef SIMD_COEF_32 // Simple euclid algorithm for GCD static int GCD (int a, int b) { while (b) { int t = b; b = a % b; a = t; } return a; } // simple algorithm for LCM is (a*b)/GCD(a,b) static int LCM(int a, int b) { a/=GCD(a,b); return a*b; } #endif static void dyna_setupOMP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt) { unsigned int i; #ifndef SIMD_COEF_32 curdat.omp_granularity=OMP_INC; #else if ((curdat.pSetup->flags& MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) curdat.omp_granularity=OMP_INC; else { curdat.omp_granularity = 1; for (i=0; Setup->pFuncs[i]; ++i) { if (isMD5Func(Setup->pFuncs[i])) curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD5*SIMD_COEF_32); else if (isMD4Func(Setup->pFuncs[i])) curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD4*SIMD_COEF_32); else if (isSHA1Func(Setup->pFuncs[i])) curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA1*SIMD_COEF_32); else if (isSHA2_256Func(Setup->pFuncs[i])) #if SIMD_COEF_32 #if SIMD_PARA_SHA256 curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA256*SIMD_COEF_32); #else curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_32); #endif #else curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC); #endif else if (isSHA2_512Func(Setup->pFuncs[i])) #if SIMD_COEF_64 #if SIMD_PARA_SHA512 curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA512*SIMD_COEF_64); #else curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_64); #endif #else curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC); #endif } } #endif for (i=0; Setup->pFuncs[i]; ++i) { if (isBadOMPFunc(Setup->pFuncs[i])) pFmt->params.flags &= (~(FMT_OMP|FMT_OMP_BAD)); } if ((pFmt->params.flags&FMT_OMP)==FMT_OMP && (curdat.pSetup->startFlags&MGF_POOR_OMP)==MGF_POOR_OMP) pFmt->params.flags |= FMT_OMP_BAD; } #endif int dynamic_SETUP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt) { unsigned int i, j, cnt, cnt2, x; DYNAMIC_primitive_funcp *pFuncs; if (Setup->flags & MGF_ColonNOTValid) { extern struct options_main options; if (options.loader.field_sep_char == ':') { return 0; } } // Deal with depricated 1st functions. Convert them to proper 'flags' if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeysToInput) Setup->startFlags |= MGF_KEYS_INPUT; if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2) Setup->startFlags |= MGF_KEYS_CRYPT_IN2; if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1) Setup->startFlags |= MGF_KEYS_BASE16_IN1; if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32) Setup->startFlags |= MGF_KEYS_BASE16_IN1_Offset32; curdat.dynamic_40_byte_input = ((Setup->startFlags&MGF_INPUT_20_BYTE)==MGF_INPUT_20_BYTE) ? 1 : 0; curdat.dynamic_48_byte_input = ((Setup->startFlags&MGF_INPUT_24_BYTE)==MGF_INPUT_24_BYTE) ? 1 : 0; curdat.dynamic_64_byte_input = ((Setup->startFlags&MGF_INPUT_32_BYTE)==MGF_INPUT_32_BYTE) ? 1 : 0; curdat.dynamic_56_byte_input = ((Setup->startFlags&MGF_INPUT_28_BYTE)==MGF_INPUT_28_BYTE) ? 1 : 0; curdat.dynamic_80_byte_input = ((Setup->startFlags&MGF_INPUT_40_BYTE)==MGF_INPUT_40_BYTE) ? 1 : 0; curdat.dynamic_96_byte_input = ((Setup->startFlags&MGF_INPUT_48_BYTE)==MGF_INPUT_48_BYTE) ? 1 : 0; curdat.dynamic_128_byte_input= ((Setup->startFlags&MGF_INPUT_64_BYTE)==MGF_INPUT_64_BYTE) ? 1 : 0; curdat.FldMask = 0; curdat.b2Salts = ((Setup->flags&MGF_SALTED2)==MGF_SALTED2) ? 1 : 0; curdat.dynamic_base16_upcase = ((Setup->flags&MGF_BASE_16_OUTPUT_UPCASE)==MGF_BASE_16_OUTPUT_UPCASE) ? 1 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD0)==MGF_FLD0) ? MGF_FLD0 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD1)==MGF_FLD1) ? MGF_FLD1 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD2)==MGF_FLD2) ? MGF_FLD2 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD3)==MGF_FLD3) ? MGF_FLD3 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD4)==MGF_FLD4) ? MGF_FLD4 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD5)==MGF_FLD5) ? MGF_FLD5 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD6)==MGF_FLD6) ? MGF_FLD6 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD7)==MGF_FLD7) ? MGF_FLD7 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD8)==MGF_FLD8) ? MGF_FLD8 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD9)==MGF_FLD9) ? MGF_FLD9 : 0; curdat.dynamic_base64_inout = 0; curdat.dynamic_salt_as_hex = 0; curdat.dynamic_salt_as_hex_format_type = 0; curdat.force_md5_ctx = 0; curdat.nUserName = 0; curdat.nPassCase = 1; curdat.md5_startup_in_x86 = curdat.dynamic_use_sse = 0; // if 0, then never use SSE2 curdat.init = 0; curdat.pSetup = Setup; pFmt->methods.binary = get_binary; pFmt->methods.cmp_all=cmp_all; pFmt->methods.cmp_one=cmp_one; pFmt->methods.source=fmt_default_source; pFmt->methods.salt = get_salt; pFmt->methods.done = done; pFmt->methods.set_salt = set_salt; pFmt->methods.salt_hash = salt_hash; //pFmt->params.format_name = str_alloc_copy(Setup->szFORMAT_NAME); pFmt->params.format_name = ""; pFmt->params.benchmark_length = 0; // NOTE 0 'assumes' salted. If unsalted, we set back to -1 pFmt->params.salt_size = 0; curdat.using_flat_buffers_sse2_ok = 0; // used to distingish MGF_NOTSSE2Safe from MGF_FLAT_BUFFERS if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) curdat.using_flat_buffers_sse2_ok = 1; #ifdef SIMD_COEF_32 curdat.dynamic_use_sse = 1; // if 1, then we are in SSE2 mode (but can switch out) if ((Setup->flags & MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) { curdat.dynamic_use_sse = 0; // Do not use SSE code at all. } else if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) { curdat.dynamic_use_sse = 0; // uses flat buffers but will use SSE code (large formats use the flat buffers, and the SSE2 code 'mixes' them). curdat.using_flat_buffers_sse2_ok = 1; } else if ((Setup->flags & MGF_StartInX86Mode) == MGF_StartInX86Mode) { curdat.dynamic_use_sse = 2; // if 2, then we are in SSE2 mode, but currently using X86 (and can switch back to SSE2). curdat.md5_startup_in_x86 = 1; } if (curdat.dynamic_use_sse || curdat.using_flat_buffers_sse2_ok) { pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT; pFmt->params.algorithm_name = ALGORITHM_NAME; } else { pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86; pFmt->params.algorithm_name = ALGORITHM_NAME_X86; } #else pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86; pFmt->params.algorithm_name = ALGORITHM_NAME_X86; #endif pFmt->params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt; if (pFmt->params.min_keys_per_crypt > 64) pFmt->params.min_keys_per_crypt = 64; dynamic_use_sse = curdat.dynamic_use_sse; // Ok, set the new 'constants' data memset(curdat.Consts, 0, sizeof(curdat.Consts)); memset(curdat.ConstsLen, 0, sizeof(curdat.ConstsLen)); for (curdat.nConsts = 0; curdat.nConsts < 8; ++curdat.nConsts) { if (Setup->pConstants[curdat.nConsts].Const == NULL) break; //curdat.Consts[curdat.nConsts] = (unsigned char*)str_alloc_copy(Setup->pConstants[curdat.nConsts].Const); //curdat.ConstsLen[curdat.nConsts] = strlen(Setup->pConstants[curdat.nConsts].Const); // we really do not 'have' to null terminate, but do just to be on the 'safe' side. curdat.Consts[curdat.nConsts] = mem_alloc_tiny(Setup->pConstants[curdat.nConsts].len+1, MEM_ALIGN_NONE); memcpy(curdat.Consts[curdat.nConsts], Setup->pConstants[curdat.nConsts].Const, Setup->pConstants[curdat.nConsts].len); curdat.Consts[curdat.nConsts][Setup->pConstants[curdat.nConsts].len] = 0; curdat.ConstsLen[curdat.nConsts] = Setup->pConstants[curdat.nConsts].len; } if ( (Setup->flags & MGF_INPBASE64) == MGF_INPBASE64) { curdat.dynamic_base64_inout = 1; pFmt->methods.binary = binary_b64; } if ( (Setup->flags & MGF_INPBASE64m) == MGF_INPBASE64m) { curdat.dynamic_base64_inout = 3; pFmt->methods.binary = binary_b64m; } if ( (Setup->flags & MGF_INPBASE64b) == MGF_INPBASE64b) { curdat.dynamic_base64_inout = 5; pFmt->methods.binary = binary_b64b; } if ( (Setup->flags & MGF_INPBASE64_4x6) == MGF_INPBASE64_4x6) { curdat.dynamic_base64_inout = 2; pFmt->methods.binary = binary_b64_4x6; pFmt->methods.cmp_all = cmp_all_64_4x6; pFmt->methods.cmp_one = cmp_one_64_4x6; #if !ARCH_LITTLE_ENDIAN pFmt->methods.binary_hash[0] = binary_hash_0_64x4; pFmt->methods.binary_hash[1] = binary_hash_1_64x4; pFmt->methods.binary_hash[2] = binary_hash_2_64x4; pFmt->methods.binary_hash[3] = binary_hash_3_64x4; pFmt->methods.binary_hash[4] = binary_hash_4_64x4; pFmt->methods.binary_hash[5] = binary_hash_5_64x4; pFmt->methods.get_hash[0] = get_hash_0_64x4; pFmt->methods.get_hash[1] = get_hash_1_64x4; pFmt->methods.get_hash[2] = get_hash_2_64x4; pFmt->methods.get_hash[3] = get_hash_3_64x4; pFmt->methods.get_hash[4] = get_hash_4_64x4; pFmt->methods.get_hash[5] = get_hash_5_64x4; #endif // Not enough bits in a single WORD if (PASSWORD_HASH_SIZE_6 >= 0x1000000) { pFmt->methods.binary_hash[6] = NULL; pFmt->methods.get_hash[6] = NULL; } if (PASSWORD_HASH_SIZE_5 >= 0x1000000) { pFmt->methods.binary_hash[5] = NULL; pFmt->methods.get_hash[5] = NULL; } if (PASSWORD_HASH_SIZE_4 >= 0x1000000) { pFmt->methods.binary_hash[4] = NULL; pFmt->methods.get_hash[4] = NULL; } if (PASSWORD_HASH_SIZE_3 >= 0x1000000) { pFmt->methods.binary_hash[3] = NULL; pFmt->methods.get_hash[3] = NULL; } } // printf("%.13s",Setup->szFORMAT_NAME); if ( (Setup->flags & (MGF_INPBASE64|MGF_INPBASE64_4x6|MGF_INPBASE64a|MGF_INPBASE64m|MGF_INPBASE64b)) == 0) { pFmt->params.flags |= FMT_SPLIT_UNIFIES_CASE; // printf(" Setting FMT_SPLIT_UNIFIES_CASE"); if (pFmt->methods.split == split) { pFmt->methods.split = split_UC; // printf(" split set to split_UC()\n"); } } // else printf(" split set to split()\n"); if (Setup->flags & MGF_UTF8) pFmt->params.flags |= FMT_UTF8; if (Setup->flags & MGF_INPBASE64a) { curdat.dynamic_base64_inout = 1; pFmt->methods.binary = binary_b64a; } if ( (Setup->flags & MGF_USERNAME) == MGF_USERNAME) curdat.nUserName = 1; if ( (Setup->flags & MGF_USERNAME_UPCASE) == MGF_USERNAME_UPCASE) curdat.nUserName = 2; if ( (Setup->flags & MGF_USERNAME_LOCASE) == MGF_USERNAME_LOCASE) curdat.nUserName = 3; // Ok, what 'flag' in the format struct, do we clear??? if ( (Setup->flags & MGF_PASSWORD_UPCASE) == MGF_PASSWORD_UPCASE) { curdat.nPassCase = 2; pFmt->params.flags &= (~FMT_CASE); } if ( (Setup->flags & MGF_PASSWORD_LOCASE) == MGF_PASSWORD_LOCASE) { curdat.nPassCase = 3; pFmt->params.flags &= (~FMT_CASE); } if ( (Setup->flags & MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) { curdat.dynamic_salt_as_hex = 1; curdat.dynamic_salt_as_hex_format_type = Setup->flags >> 56; } if ( (Setup->flags & MGF_SALT_AS_HEX_TO_SALT2) == MGF_SALT_AS_HEX_TO_SALT2) { curdat.dynamic_salt_as_hex = 2; if (curdat.b2Salts) return !fprintf(stderr, "Error invalid format %s: MGF_SALT_AS_HEX_TO_SALT2 and MGF_SALTED2 are not valid to use in same format\n", Setup->szFORMAT_NAME); curdat.b2Salts = 2; } if ( (Setup->flags & MGF_SALT_UNICODE_B4_CRYPT) == MGF_SALT_UNICODE_B4_CRYPT && curdat.dynamic_salt_as_hex) curdat.dynamic_salt_as_hex |= 0x100; if ( (Setup->flags & MGF_SALTED) == 0) { curdat.dynamic_FIXED_SALT_SIZE = 0; pFmt->params.benchmark_length = -1; pFmt->params.salt_size = 0; } else { pFmt->params.salt_size = sizeof(void *); if (Setup->SaltLen > 0) curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen; else { // says we have a salt, but NOT a fixed sized one that we 'know' about. // if the SaltLen is -1, then there is NO constraints. If the SaltLen // is -12 (or any other neg number other than -1), then there is no // fixed salt length, but the 'max' salt size is -SaltLen. So, -12 // means any salt from 1 to 12 is 'valid'. if (Setup->SaltLen > -2) curdat.dynamic_FIXED_SALT_SIZE = -1; else { curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen; #if !defined (SIMD_COEF_32) // for non-sse, we limit ourselves to 110 bytes, not 55. So, we can add 55 to this value curdat.dynamic_FIXED_SALT_SIZE -= 55; #endif } } } if (Setup->MaxInputLen) pFmt->params.plaintext_length = Setup->MaxInputLen; else { if ( ((Setup->flags&MGF_FLAT_BUFFERS)==MGF_FLAT_BUFFERS) || ((Setup->flags&MGF_NOTSSE2Safe)==MGF_NOTSSE2Safe)) { pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen); if (pFmt->params.plaintext_length < 32) pFmt->params.plaintext_length = 32; } else { pFmt->params.plaintext_length = 55 - abs(Setup->SaltLen); if (pFmt->params.plaintext_length < 1) { pFmt->params.plaintext_length = 1; fprintf(stderr, "\nError, for format %s, MMX build, is not valid due to TOO long of a SaltLength\n", Setup->szFORMAT_NAME); } } } #ifndef SIMD_COEF_32 if (Setup->MaxInputLenX86) { pFmt->params.plaintext_length = Setup->MaxInputLenX86; } else { if (Setup->SaltLenX86) pFmt->params.plaintext_length = 110 - abs(Setup->SaltLenX86); else pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen); if (pFmt->params.plaintext_length < 32) pFmt->params.plaintext_length = 32; } #endif curdat.store_keys_in_input = !!(Setup->startFlags&MGF_KEYS_INPUT ); curdat.input2_set_len32 = !!(Setup->startFlags&MGF_SET_INP2LEN32); if (Setup->startFlags&MGF_SOURCE) { if (Setup->startFlags&MGF_INPUT_20_BYTE) pFmt->methods.source = source_20_hex; else if (Setup->startFlags&MGF_INPUT_28_BYTE) pFmt->methods.source = source_28_hex; else if (Setup->startFlags&MGF_INPUT_32_BYTE) pFmt->methods.source = source_32_hex; else if (Setup->startFlags&MGF_INPUT_40_BYTE) pFmt->methods.source = source_40_hex; else if (Setup->startFlags&MGF_INPUT_48_BYTE) pFmt->methods.source = source_48_hex; else if (Setup->startFlags&MGF_INPUT_64_BYTE) pFmt->methods.source = source_64_hex; else pFmt->methods.source = source; } if (!curdat.store_keys_in_input && Setup->startFlags&MGF_KEYS_INPUT_BE_SAFE) curdat.store_keys_in_input = 3; curdat.store_keys_in_input_unicode_convert = !!(Setup->startFlags&MGF_KEYS_UNICODE_B4_CRYPT); if (curdat.store_keys_in_input_unicode_convert && curdat.store_keys_in_input) return !fprintf(stderr, "Error invalid format %s: Using MGF_KEYS_INPUT and MGF_KEYS_UNICODE_B4_CRYPT in same format is NOT valid\n", Setup->szFORMAT_NAME); curdat.store_keys_normal_but_precompute_hash_to_output2 = !!(Setup->startFlags&MGF_KEYS_CRYPT_IN2); curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1); if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) curdat.store_keys_normal_but_precompute_hash_to_output2 = 1; #define IF_CDOFF32(F,L) if (!curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) \ curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = \ (!!((Setup->startFlags&MGF_KEYS_BASE16_IN1_Offset_TYPE)==MGF_KEYS_BASE16_IN1_Offset_ ## F))*L curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = 0; IF_CDOFF32(MD5,32); IF_CDOFF32(MD4,32); IF_CDOFF32(SHA1,40); IF_CDOFF32(SHA224,56); IF_CDOFF32(SHA256,64); IF_CDOFF32(SHA384,96); IF_CDOFF32(SHA512,128); IF_CDOFF32(GOST,64); IF_CDOFF32(WHIRLPOOL,128); IF_CDOFF32(Tiger,48); IF_CDOFF32(RIPEMD128,32); IF_CDOFF32(RIPEMD160,40); IF_CDOFF32(RIPEMD256,64); IF_CDOFF32(RIPEMD320,80); IF_CDOFF32(MD2,32); IF_CDOFF32(PANAMA,64); IF_CDOFF32(HAVAL128_3,32); IF_CDOFF32(HAVAL160_3,40); IF_CDOFF32(HAVAL192_3,48); IF_CDOFF32(HAVAL224_3,56); IF_CDOFF32(HAVAL256_3,64); IF_CDOFF32(HAVAL128_4,32); IF_CDOFF32(HAVAL160_4,40); IF_CDOFF32(HAVAL192_4,48); IF_CDOFF32(HAVAL224_4,56); IF_CDOFF32(HAVAL256_4,64); IF_CDOFF32(HAVAL128_5,32); IF_CDOFF32(HAVAL160_5,40); IF_CDOFF32(HAVAL192_5,48); IF_CDOFF32(HAVAL224_5,56); IF_CDOFF32(HAVAL256_5,64); IF_CDOFF32(SKEIN224,56); IF_CDOFF32(SKEIN256,64); IF_CDOFF32(SKEIN384,96); IF_CDOFF32(SKEIN512,128); IF_CDOFF32(SHA3_224,56); IF_CDOFF32(SHA3_256,64); IF_CDOFF32(SHA3_384,96); IF_CDOFF32(SHA3_512,128); IF_CDOFF32(KECCAK_256,64); IF_CDOFF32(KECCAK_512,128); // LARGE_HASH_EDIT_POINT if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) { curdat.store_keys_normal_but_precompute_hash_to_output2 = 1; } curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type = Setup->startFlags>>56; if ((Setup->startFlags) == 0) { // Ok, if we do not have some 'special' loader function, we MUST first clean some // input. If that is not done, there is NO WAY this is a valid format. This is // NOT an intelligent check, but more like the dummy lights on newer automobiles. // You know it will not work, but do not know 'why', nor should you care. if (Setup->pFuncs[0] != DynamicFunc__clean_input && Setup->pFuncs[0] != DynamicFunc__clean_input2 && Setup->pFuncs[0] != DynamicFunc__clean_input_kwik && Setup->pFuncs[0] != DynamicFunc__clean_input2_kwik && Setup->pFuncs[0] != DynamicFunc__clean_input_full) return !fprintf(stderr, "Error invalid format %s: The first command MUST be a clean of input 1 or input 2 OR a special key 2 input loader function\n", Setup->szFORMAT_NAME); } if ( (Setup->flags&MGF_SALTED2)==MGF_SALTED2 && (Setup->flags&MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) { // if the user wants salt_as_hex, then here can NOT be 2 salts. return !fprintf(stderr, "Error invalid format %s: If using MGF_SALT_AS_HEX flag, then you can NOT have a 2nd salt.\n", Setup->szFORMAT_NAME); } if (Setup->pFuncs && Setup->pFuncs[0]) { unsigned int z; for (z = 0; Setup->pFuncs[z]; ++z) ; z += 50; curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD); j = 0; #if !ARCH_LITTLE_ENDIAN // for bigendian, we do NOT store into keys, since we byte swap them. if (curdat.store_keys_in_input==1) { // this is only a minor speed hit, so simply fix by doing this. There is an // extra memcpy, that is it. curdat.store_keys_in_input = 0; curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input; curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys; } // NOTE NOTE NOTE, FIXME. These are 'hacks' which slow stuff way down. We should look at // building preloads that CAN do this. Store key input to input 1, but then do not use // input 1. Put a copy to input 2, then append, etc. In that way, we cut the number of // MD5's down by at least 1. // // But for now, just get it working. Get it working faster later. // NOTE, these are commented out now. I am not sure why they were there // I think the thought was for SIMD, BUT SIMD is not used on Sparc // I am leaving this code for now, BUT I think it should NOT be here. // I was getting failures on the 16 byte sph formats, for any // hash(hash($p).$s) such as md2(md2($p).$s) However, the modifications // where curdat.store_keys_in_input==1 is absolutely needed, or we have // get_key() failures all over the place. // note, with Setup->pFuncs[0]==DynamicFunc__set_input_len_32, we only will handle type 6 and 7 // for now we have this 'turned' off. It is fixed for type 6, 7 and 14. It is left on for the // john.ini stuff. Thus, if someone builds the intel version type 6, it will work (but slower). // if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==1 && Setup->pFuncs[0]==DynamicFunc__set_input_len_32) { // curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = 0; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__crypt_md5; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input; // Setup->pFuncs[0] = DynamicFunc__append_from_last_output_as_base16; // } #endif for (i=0; Setup->pFuncs[i]; ++i) { if (j > z-10) { unsigned int k; z += 100; curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD); for (k = 0; k <= j; ++k) curdat.dynamic_FUNCTIONS[k] = curdat.dynamic_FUNCTIONS[k]; } if (curdat.store_keys_in_input) { if (Setup->pFuncs[i] == DynamicFunc__append_keys) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_keys2) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys2 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__clean_input) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but clean_input called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_salt) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_salt called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output2_to_input1_as_base16) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output2_to_input1_as_base16 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output2_to_input1_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output_as_base16) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output_as_base16s called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_2nd_salt called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__set_input_len_32) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__set_input_len_64) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__overwrite_salt_to_input1_no_size_fix) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_input_from_input2) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); } // Ok if copy constants are set, make SURE we have that many constants. if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST1 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST1) && curdat.nConsts == 0) return !fprintf(stderr, "Error invalid format %s: Append Constant function called, but NO constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST2 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST2) && curdat.nConsts < 2) return !fprintf(stderr, "Error invalid format %s: Append Constant #2 function called, but NO constants, or less than 2 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST3 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST3) && curdat.nConsts < 3) return !fprintf(stderr, "Error invalid format %s: Append Constant #3 function called, but NO constants, or less than 3 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST4 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST4) && curdat.nConsts < 4) return !fprintf(stderr, "Error invalid format %s: Append Constant #4 function called, but NO constants, or less than 4 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST5 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST5) && curdat.nConsts < 5) return !fprintf(stderr, "Error invalid format %s: Append Constant #5 function called, but NO constants, or less than 5 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST6 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST6) && curdat.nConsts < 6) return !fprintf(stderr, "Error invalid format %s: Append Constant #6 function called, but NO constants, or less than 6 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST7 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST7) && curdat.nConsts < 7) return !fprintf(stderr, "Error invalid format %s: Append Constant #7 function called, but NO constants, or less than 7 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST8 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST8) && curdat.nConsts < 8) return !fprintf(stderr, "Error invalid format %s: Append Constant #8 function called, but NO constants, or less than 8 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt || Setup->pFuncs[i] == DynamicFunc__append_2nd_salt2) && curdat.b2Salts == 0) return !fprintf(stderr, "Error invalid format %s: A call to one of the 'salt-2' functions, but this format does not have MFG_SALT2 flag set\n", Setup->szFORMAT_NAME); // Ok, if we have made it here, the function is 'currently' still valid. Load this pointer into our array of pointers. pFuncs = ConvertFuncs(Setup->pFuncs[i], &cnt2); #define IS_FUNC_NAME(H,N) if (is##H##Func(pFuncs[x])){ if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME)) pFmt->params.algorithm_name = ALGORITHM_NAME_##N; \ else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86)) pFmt->params.algorithm_name = ALGORITHM_NAME_X86_##N; } for (x = 0; x < cnt2; ++x) { curdat.dynamic_FUNCTIONS[j++] = pFuncs[x]; if (pFuncs[x] == DynamicFunc__setmode_unicode || pFuncs[x] == DynamicFunc__setmode_unicodeBE) pFmt->params.flags |= FMT_UNICODE; IS_FUNC_NAME(SHA1,S) if (isSHA2_256Func(pFuncs[x])) { #ifdef SIMD_COEF_32 if (curdat.using_flat_buffers_sse2_ok) pFmt->params.algorithm_name = ALGORITHM_NAME_S2_256; else #endif pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_256; } if (isSHA2_512Func(pFuncs[x])) { #ifdef SIMD_COEF_64 if (curdat.using_flat_buffers_sse2_ok) pFmt->params.algorithm_name = ALGORITHM_NAME_S2_512; else #endif pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_512; } IS_FUNC_NAME(MD4,4) IS_FUNC_NAME(WHIRL,WP2) IS_FUNC_NAME(GOST,GST2) IS_FUNC_NAME(Tiger,TGR) IS_FUNC_NAME(RIPEMD,RIPEMD) IS_FUNC_NAME(HAVAL,HAVAL) IS_FUNC_NAME(MD2,MD2) IS_FUNC_NAME(PANAMA,PANAMA) IS_FUNC_NAME(SKEIN,SKEIN) // Note, until we add SIMD keccak, one algoithm is all we 'need' IS_FUNC_NAME(KECCAK,KECCAK) // IS_FUNC_NAME(KECCAK,SHA3_256) // IS_FUNC_NAME(KECCAK,SHA3_384) // IS_FUNC_NAME(KECCAK,SHA3_512) // IS_FUNC_NAME(KECCAK,KECCAK_256) // IS_FUNC_NAME(KECCAK,KECCAK_512) // LARGE_HASH_EDIT_POINT (MUST match the just added a new IsXXXFunc() type function) } if (isLargeHashFinalFunc(curdat.dynamic_FUNCTIONS[j-1])) { if (Setup->pFuncs[i+1]) return !fprintf(stderr, "Error invalid format %s: DynamicFunc__LARGE_HASH_crypt_inputX_to_output1_FINAL, can ONLY be used as the last function in a script\n", Setup->szFORMAT_NAME); } } curdat.dynamic_FUNCTIONS[j] = NULL; } if (!Setup->pPreloads || Setup->pPreloads[0].ciphertext == NULL) { return !fprintf(stderr, "Error invalid format %s: Error, no validation hash(s) for this format\n", Setup->szFORMAT_NAME); } cnt = 0; #ifdef _OPENMP dyna_setupOMP(Setup, pFmt); #endif { struct fmt_tests *pfx = mem_alloc_tiny(ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests), MEM_ALIGN_WORD); memset(pfx, 0, ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests)); for (i = 0; cnt < ARRAY_COUNT(dynamic_tests) -1; ++i) { if (Setup->pPreloads[i].ciphertext == NULL) { i = 0; } if (Setup->pPreloads[i].ciphertext[0] == 'A' && Setup->pPreloads[i].ciphertext[1] == '=') { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) continue; pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]); } else if (Setup->pPreloads[i].ciphertext[0] == 'U' && Setup->pPreloads[i].ciphertext[1] == '=') { if (options.target_enc != UTF_8) continue; pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]); } else pfx[cnt].ciphertext = str_alloc_copy(Setup->pPreloads[i].ciphertext); pfx[cnt].plaintext = str_alloc_copy(Setup->pPreloads[i].plaintext); pfx[cnt].fields[0] = Setup->pPreloads[i].fields[0] ? str_alloc_copy(Setup->pPreloads[i].fields[0]) : ""; pfx[cnt].fields[1] = pfx[cnt].ciphertext; for (j = 2; j < 10; ++j) pfx[cnt].fields[j] = Setup->pPreloads[i].fields[j] ? str_alloc_copy(Setup->pPreloads[i].fields[j]) : ""; ++cnt; } pfx[cnt].ciphertext = NULL; pfx[cnt].plaintext = NULL; pFmt->params.tests = pfx; } if (curdat.dynamic_base16_upcase) dynamic_itoa16 = itoa16u; else dynamic_itoa16 = itoa16; { char s[512], *cp; cp = Setup->szFORMAT_NAME; cp = strchr(Setup->szFORMAT_NAME, ' '); ++cp; sprintf(s, "%s %s", cp, pFmt->params.algorithm_name); pFmt->params.algorithm_name = str_alloc_copy(s); } if ((Setup->flags & MGF_SALTED) && !Setup->SaltLen) return !fprintf(stderr, "Error invalid format %s\n\tIt is required to add SaltLen= to the script, for this format\n", Setup->szFORMAT_NAME); return 1; } static int LoadOneFormat(int idx, struct fmt_main *pFmt) { extern struct options_main options; char label[16] = { 0 }, label_id[16] = { 0 }, *cp = NULL; memcpy(pFmt, &fmt_Dynamic, sizeof(struct fmt_main)); // TODO: // NOTE, this was commented out, because the late binding @dynamic=expr@ // hashes were killing out possibly pre-setup input buffers. NOTE, that // things worked fine after this, all self tests do pass, and I am 99% // sure that all of this 'required' cleaning happens in init(). but I am // putting this comment in here, so that if at a later time, there are // problems and are tracked down to this, we will know why. // dynamic_RESET(pFmt); // Ok we need to list this as a dynamic format (even for the 'thin' formats) pFmt->params.flags |= FMT_DYNAMIC; if (idx < 1000) { if (dynamic_RESERVED_PRELOAD_SETUP(idx, pFmt) != 1) return 0; } else { if (dynamic_LOAD_PARSER_FUNCTIONS(idx, pFmt) != 1) return 0; } /* we 'have' to take the sig from the test array. If we do not have */ /* our preload array 'solid', then the idx will not be the proper */ /* number. So we simply grab the label from the test cyphertext string */ strncpy(label, pFmt->params.tests[0].ciphertext, 15); cp = strchr(&label[1], '$'); if (NULL != cp) cp[1] = 0; strcpy(label_id, &label[1]); cp = strchr(label_id, '$'); if (NULL != cp) *cp = 0; // if (!options.format || strncmp(options.format, "dynamic_", 8)) // pFmt->params.label = str_alloc_copy("dynamic"); // else pFmt->params.label = str_alloc_copy(label_id); strcpy(curdat.dynamic_WHICH_TYPE_SIG, label); curdat.dynamic_HASH_OFFSET = strlen(label); if (curdat.dynamic_base64_inout == 1 || curdat.dynamic_base64_inout == 3) { // we have to compute 'proper' offset const char *cp = pFmt->params.tests[0].ciphertext; size_t len = base64_valid_length(&cp[curdat.dynamic_HASH_OFFSET], curdat.dynamic_base64_inout == 1 ? e_b64_crypt : e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + len + 1; } else if (curdat.dynamic_base64_inout == 2) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 16 + 1; else if (curdat.dynamic_40_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 40 + 1; else if (curdat.dynamic_48_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 48 + 1; else if (curdat.dynamic_64_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 64 + 1; else if (curdat.dynamic_56_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 56 + 1; else if (curdat.dynamic_80_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 80 + 1; else if (curdat.dynamic_96_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 96 + 1; else if (curdat.dynamic_128_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 128 + 1; else curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 32 + 1; pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD); memcpy(pFmt->private.data, &curdat, sizeof(private_subformat_data)); if (strncmp(curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext, strlen(curdat.dynamic_WHICH_TYPE_SIG))) { fprintf(stderr, "ERROR, when loading dynamic formats, the wrong curdat item was linked to this type:\nTYPE_SIG=%s\nTest_Dat=%s\n", curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext); return 0; } return 1; } struct fmt_main *dynamic_Register_local_format(int *type) { int num=nLocalFmts++; private_subformat_data keep; if (!pLocalFmts) pLocalFmts = mem_calloc_tiny(1000*sizeof(struct fmt_main), 16); /* since these are loaded LATE in the process, init() has been called * and we HAVE to preserve the already loaded setup. This will happen * if we run a crack, but do not specify a specific dyna format */ memcpy(&keep, &curdat, sizeof(private_subformat_data)); LoadOneFormat(num+6000, &(pLocalFmts[num])); memcpy(&curdat, &keep, sizeof(private_subformat_data)); dynamic_use_sse = curdat.dynamic_use_sse; force_md5_ctx = curdat.force_md5_ctx; *type = num+6000; return &(pLocalFmts[num]); } int dynamic_Register_formats(struct fmt_main **ptr) { int count, i, idx, single=-1, wildcard = 0, pop[5000]; extern struct options_main options; if (options.format && strstr(options.format, "*")) wildcard = 1; Dynamic_Load_itoa16_w2(); if (!wildcard && options.format && !strncmp(options.format, "dynamic_", 8)) sscanf(options.format, "dynamic_%d", &single); if (options.format && options.subformat && !strcmp(options.format, "dynamic") && !strncmp(options.subformat, "dynamic_", 8)) sscanf(options.subformat, "dynamic_%d", &single); if (options.dynamic_bare_hashes_always_valid == 'Y') dynamic_allow_rawhash_fixup = 1; else if (options.dynamic_bare_hashes_always_valid != 'N' && cfg_get_bool(SECTION_OPTIONS, NULL, "DynamicAlwaysUseBareHashes", 1)) dynamic_allow_rawhash_fixup = 1; if (single != -1) { // user wanted only a 'specific' format. Simply load that one. dynamic_allow_rawhash_fixup = 1; if (dynamic_IS_VALID(single, 1) == 0) return 0; pFmts = mem_alloc_tiny(sizeof(pFmts[0]), MEM_ALIGN_WORD); if (!LoadOneFormat(single, pFmts)) return 0; *ptr = pFmts; return (nFmts = 1); } for (count = i = 0; i < 5000; ++i) { if ((pop[i] = (dynamic_IS_VALID(i, 0) == 1))) ++count; } // Ok, now we know how many formats we have. Load them pFmts = mem_alloc_tiny(sizeof(pFmts[0])*count, MEM_ALIGN_WORD); for (idx = i = 0; i < 5000; ++i) { if (pop[i]) { if (LoadOneFormat(i, &pFmts[idx]) == 0) --count; else ++idx; } } *ptr = pFmts; return (nFmts = count); } /* * finds the 'proper' sub format from the allocated formats, IFF that format 'exists' */ static struct fmt_main *dynamic_Get_fmt_main(int which) { char label[40]; int i; sprintf(label, "$dynamic_%d$", which); for (i = 0; i < nFmts; ++i) { private_subformat_data *pPriv = pFmts[i].private.data; if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label)) return &pFmts[i]; } for (i = 0; i < nLocalFmts; ++i) { private_subformat_data *pPriv = pLocalFmts[i].private.data; if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label)) return &pLocalFmts[i]; } return NULL; } /* * This function will 'forget' which md5-gen subtype we are working with. It will allow * a different type to be used. Very useful for things like -test (benchmarking). */ static void dynamic_RESET(struct fmt_main *fmt) { memset(&curdat, 0, sizeof(curdat)); m_count = 0; keys_dirty = 0; cursalt=cursalt2=username=0; saltlen=saltlen2=usernamelen=0; // make 'sure' we startout with blank inputs. m_count = 0; #ifdef SIMD_COEF_32 if (input_buf) { #else if (input_buf_X86) { #endif __nonMP_DynamicFunc__clean_input_full(); __nonMP_DynamicFunc__clean_input2_full(); } } /* * This will LINK our functions into some other fmt_main struction. That way * that struction can use our code. The other *_fmt.c file will need to * 'override' the valid, the binary and the salt functions, and make changes * to the hash, BEFORE calling into the dynamic valid/binary/salt functions. * Other than those functions (and calling into this linkage function at init time) * that is about all that needs to be in that 'other' *_fmt.c file, as long as the * format is part of the md5-generic 'class' of functions. */ struct fmt_main *dynamic_THIN_FORMAT_LINK(struct fmt_main *pFmt, char *ciphertext, char *orig_sig, int bInitAlso) { int i, valid, nFmtNum; struct fmt_main *pFmtLocal; static char subformat[17], *cp; dynamic_allow_rawhash_fixup = 0; strncpy(subformat, ciphertext, 16); subformat[16] = 0; cp = strchr(&subformat[9], '$'); if (cp) cp[1] = 0; nFmtNum = -1; sscanf(subformat, "$dynamic_%d", &nFmtNum); if (nFmtNum == -1) error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext); pFmtLocal = dynamic_Get_fmt_main(nFmtNum); if (pFmtLocal == NULL) error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext); valid = pFmtLocal->methods.valid(ciphertext, pFmtLocal); if (!valid) error_msg("Error, trying to link to %s using ciphertext=%s FAILED\n", subformat, ciphertext); pFmt->params.algorithm_name = pFmtLocal->params.algorithm_name; if (pFmt->params.plaintext_length == 0 || pFmt->params.plaintext_length > pFmtLocal->params.plaintext_length) { pFmt->params.plaintext_length = pFmtLocal->params.plaintext_length; pFmt->params.plaintext_min_length = pFmtLocal->params.plaintext_min_length; } pFmt->params.max_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt; pFmt->params.min_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt; if (pFmt->params.min_keys_per_crypt > 64) pFmt->params.min_keys_per_crypt = 64; pFmt->params.flags = pFmtLocal->params.flags; if (pFmtLocal->params.salt_size) pFmt->params.salt_size = sizeof(void*); else pFmt->params.salt_size = 0; pFmt->methods.cmp_all = pFmtLocal->methods.cmp_all; pFmt->methods.cmp_one = pFmtLocal->methods.cmp_one; pFmt->methods.cmp_exact = pFmtLocal->methods.cmp_exact; for (i = 0; i < FMT_TUNABLE_COSTS; ++i) { pFmt->methods.tunable_cost_value[i] = pFmtLocal->methods.tunable_cost_value[i]; pFmt->params.tunable_cost_name[i] = pFmtLocal->params.tunable_cost_name[i]; } pFmt->methods.source = pFmtLocal->methods.source; pFmt->methods.set_salt = pFmtLocal->methods.set_salt; pFmt->methods.salt = pFmtLocal->methods.salt; pFmt->methods.done = pFmtLocal->methods.done; pFmt->methods.salt_hash = pFmtLocal->methods.salt_hash; pFmt->methods.split = pFmtLocal->methods.split; pFmt->methods.set_key = pFmtLocal->methods.set_key; pFmt->methods.get_key = pFmtLocal->methods.get_key; pFmt->methods.clear_keys = pFmtLocal->methods.clear_keys; pFmt->methods.crypt_all = pFmtLocal->methods.crypt_all; pFmt->methods.prepare = pFmtLocal->methods.prepare; pFmt->methods.salt_compare = pFmtLocal->methods.salt_compare; for (i = 0; i < PASSWORD_HASH_SIZES; ++i) { pFmt->methods.binary_hash[i] = pFmtLocal->methods.binary_hash[i]; pFmt->methods.get_hash[i] = pFmtLocal->methods.get_hash[i]; } if (bInitAlso) { //fprintf(stderr, "dynamic_THIN_FORMAT_LINK() calling init(%s)\n", subformat); init(pFmtLocal); } pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD); memcpy(pFmt->private.data, pFmtLocal->private.data, sizeof(private_subformat_data)); return pFmtLocal; } // We ONLY deal with hex hashes at this time. Is we later have to deal with // base-64, this will become harder. Before this function we had bugs where // many things were loaded as 'being' valid, even if not. static int looks_like_raw_hash(char *ciphertext, private_subformat_data *pPriv) { int i, cipherTextLen = CIPHERTEXT_LENGTH; if (pPriv->dynamic_40_byte_input) { cipherTextLen = 40; } else if (pPriv->dynamic_48_byte_input) { cipherTextLen = 48; } else if (pPriv->dynamic_64_byte_input) { cipherTextLen = 64; } else if (pPriv->dynamic_56_byte_input) { cipherTextLen = 56; } else if (pPriv->dynamic_80_byte_input) { cipherTextLen = 80; } else if (pPriv->dynamic_96_byte_input) { cipherTextLen = 96; } else if (pPriv->dynamic_128_byte_input) { cipherTextLen = 128; } for (i = 0; i < cipherTextLen; i++) { if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7f) return 0; } if ((pPriv->pSetup->flags&MGF_SALTED) == 0) { if (!ciphertext[cipherTextLen]) return 1; return 0; } return ciphertext[cipherTextLen] == '$'; } static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv) { if (!ciphertext || *ciphertext == 0 || *ciphertext == '*') return ciphertext; if (dynamic_allow_rawhash_fixup && strncmp(ciphertext, "$dynamic_", 9) && looks_like_raw_hash(ciphertext, pPriv)) { static char __ciphertext[512+24]; if (pPriv->pSetup->flags & MGF_SALTED) { if (!strchr(ciphertext, '$')) return ciphertext; } if ( (pPriv->pSetup->flags & MGF_SALTED2) == MGF_SALTED2) { if (!strstr(ciphertext, "$$2")) return ciphertext; } if ( (pPriv->pSetup->flags & MGF_USERNAME) == MGF_USERNAME) { if (!strstr(ciphertext, "$$U")) return ciphertext; } if (pPriv->FldMask) { int i; for (i = 0; i < 10; ++i) { if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) { char Fld[8]; sprintf(Fld, "$$F%d", i); if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld)) return ciphertext; } } } strcpy(__ciphertext, pPriv->dynamic_WHICH_TYPE_SIG); strnzcpy(&__ciphertext[strlen(__ciphertext)], ciphertext, 512); return __ciphertext; } return ciphertext; } int text_in_dynamic_format_already(struct fmt_main *pFmt, char *ciphertext) { private_subformat_data *pPriv; if (!pFmt) return 0; /* NOTE, it 'is' possible to get called here, without the private stuff being setup properly (in valid, etc). So, we simply grab the static private stuff each time */ pPriv = pFmt->private.data; if (!ciphertext || !pPriv) return 0; return !strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG)); } // if caseType == 1, return cp // if caseType == 2, return upcase(cp) // if caseType == 3, return locase(cp) // if caseType == 4, return upcaseFirstChar(locase(cp)) static char *HandleCase(char *cp, int caseType) { static UTF8 dest[256]; switch(caseType) { case 1: return cp; case 2: enc_uc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp)); if (!strcmp((char*)dest, cp)) return cp; break; case 3: case 4: enc_lc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp)); if (caseType == 4) dest[0] = low2up_ansi(dest[0]); if (!strcmp((char*)dest, cp)) return cp; break; default: return cp; } return (char*)dest; } int dynamic_real_salt_length(struct fmt_main *pFmt) { if (pFmt->params.flags & FMT_DYNAMIC) { private_subformat_data *pPriv = pFmt->private.data; if (pPriv == NULL || pPriv->pSetup == NULL) return -1; // not a dynamic format, or called before we have loaded them!! return abs(pPriv->pSetup->SaltLen); } // NOT a dynamic format return -1; } #else #warning Notice: Dynamic format disabled from build. #endif /* DYNAMIC_DISABLED */
csf.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "csf.h" #include "sort.h" #include "tile.h" #include "ccp/ccp.h" #include "io.h" /****************************************************************************** * API FUNCTIONS *****************************************************************************/ int splatt_csf_load( char const * const fname, splatt_idx_t * nmodes, splatt_csf ** tensors, double const * const options) { sptensor_t * tt = tt_read(fname); if(tt == NULL) { return SPLATT_ERROR_BADINPUT; } tt_remove_empty(tt); *tensors = csf_alloc(tt, options); *nmodes = tt->nmodes; tt_free(tt); return SPLATT_SUCCESS; } int splatt_csf_convert( splatt_idx_t const nmodes, splatt_idx_t const nnz, splatt_idx_t ** const inds, splatt_val_t * const vals, splatt_csf ** tensors, double const * const options) { sptensor_t tt; tt_fill(&tt, nnz, nmodes, inds, vals); tt_remove_empty(&tt); *tensors = csf_alloc(&tt, options); return SPLATT_SUCCESS; } void splatt_free_csf( splatt_csf * tensors, double const * const options) { csf_free(tensors, options); } /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Find a permutation of modes that results in non-increasing mode size. * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param perm_dims The resulting permutation. */ static void p_order_dims_small( idx_t const * const dims, idx_t const nmodes, idx_t * const perm_dims) { idx_t sorted[MAX_NMODES]; idx_t matched[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { sorted[m] = dims[m]; matched[m] = 0; } quicksort(sorted, nmodes); /* silly n^2 comparison to grab modes from sorted dimensions. * TODO: make a key/val sort...*/ for(idx_t mfind=0; mfind < nmodes; ++mfind) { for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) { if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) { perm_dims[mfind] = mcheck; matched[mcheck] = 1; break; } } } } /** * @brief Find a permutation of modes such that the first mode is 'custom-mode' * and the remaining are naturally ordered (0, 1, ...). * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param custom_mode The mode to place first. * @param perm_dims The resulting permutation. */ static void p_order_dims_inorder( idx_t const * const dims, idx_t const nmodes, idx_t const custom_mode, idx_t * const perm_dims) { /* initialize to natural ordering */ for(idx_t m=0; m < nmodes; ++m) { perm_dims[m] = m; } /* find where custom_mode was placed and adjust from there */ for(idx_t m=0; m < nmodes; ++m) { if(perm_dims[m] == custom_mode) { memmove(perm_dims + 1, perm_dims, (m) * sizeof(m)); perm_dims[0] = custom_mode; break; } } } /** * @brief Find a permutation of modes such that the first mode is 'custom-mode' * and the remaining are sorted in non-increasing order. * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param custom_mode The mode to place first. * @param perm_dims The resulting permutation. */ static void p_order_dims_minusone( idx_t const * const dims, idx_t const nmodes, idx_t const custom_mode, idx_t * const perm_dims) { p_order_dims_small(dims, nmodes, perm_dims); /* find where custom_mode was placed and adjust from there */ for(idx_t m=0; m < nmodes; ++m) { if(perm_dims[m] == custom_mode) { memmove(perm_dims + 1, perm_dims, (m) * sizeof(m)); perm_dims[0] = custom_mode; break; } } } /** * @brief Find a permutation of modes that results in non-decreasing mode size. * * @param dims The tensor dimensions. * @param nmodes The number of modes. * @param perm_dims The resulting permutation. */ static void p_order_dims_large( idx_t const * const dims, idx_t const nmodes, idx_t * const perm_dims) { idx_t sorted[MAX_NMODES]; idx_t matched[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { sorted[m] = dims[m]; matched[m] = 0; } /* sort small -> large */ quicksort(sorted, nmodes); /* reverse list */ for(idx_t m=0; m < nmodes/2; ++m) { idx_t tmp = sorted[nmodes-m-1]; sorted[nmodes-m-1] = sorted[m]; sorted[m] = tmp; } /* silly n^2 comparison to grab modes from sorted dimensions. * TODO: make a key/val sort...*/ for(idx_t mfind=0; mfind < nmodes; ++mfind) { for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) { if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) { perm_dims[mfind] = mcheck; matched[mcheck] = 1; break; } } } } /** * @brief Construct the sparsity structure of the outer-mode of a CSF tensor. * * @param ct The CSF tensor to construct. * @param tt The coordinate tensor to construct from. Assumed to be already * sorted. * @param tile_id The ID of the tile to construct. * @param nnztile_ptr A pointer into 'tt' that marks the start of each tile. */ static void p_mk_outerptr( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr) { idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; assert(nnzstart <= nnzend); if(nnzstart == nnzend) { idx_t nfibs = 0; ct->pt[tile_id].nfibs[0] = nfibs; csf_sparsity * const pt = ct->pt + tile_id; pt->fptr[0] = splatt_malloc((nfibs + 1) * sizeof(**(pt->fptr))); pt->fids[0] = NULL; idx_t * const restrict fp = pt->fptr[0]; fp[0] = 0; return; } /* the mode after accounting for dim_perm */ idx_t const * const restrict ttind = tt->ind[ct->dim_perm[0]] + nnzstart; /* count fibers */ idx_t nfibs = 1; for(idx_t x=1; x < nnz; ++x) { assert(ttind[x-1] <= ttind[x]); if(ttind[x] != ttind[x-1]) { ++nfibs; } } ct->pt[tile_id].nfibs[0] = nfibs; assert(nfibs <= ct->dims[ct->dim_perm[0]]); /* grab sparsity pattern */ csf_sparsity * const pt = ct->pt + tile_id; pt->fptr[0] = splatt_malloc((nfibs+1) * sizeof(**(pt->fptr))); if(ct->ntiles > 1 || nfibs != ct->dims[ct->dim_perm[0]]) { pt->fids[0] = splatt_malloc(nfibs * sizeof(**(pt->fids))); } else { pt->fids[0] = NULL; } idx_t * const restrict fp = pt->fptr[0]; idx_t * const restrict fi = pt->fids[0]; fp[0] = 0; if(fi != NULL) { fi[0] = ttind[0]; } idx_t nfound = 1; for(idx_t n=1; n < nnz; ++n) { /* check for end of outer index */ if(ttind[n] != ttind[n-1]) { if(fi != NULL) { fi[nfound] = ttind[n]; } fp[nfound++] = n; } } fp[nfibs] = nnz; } /** * @brief Construct the sparsity structure of any mode but the last. The first * (root) mode is handled by p_mk_outerptr and the first is simply a copy * of the nonzeros. * * @param ct The CSF tensor to construct. * @param tt The coordinate tensor to construct from. Assumed to be already * sorted. * @param tile_id The ID of the tile to construct. * @param nnztile_ptr A pointer into 'tt' that marks the start of each tile. * @param mode Which mode we are constructing. */ static void p_mk_fptr( splatt_csf * const ct, sptensor_t const * const tt, idx_t const tile_id, idx_t const * const nnztile_ptr, idx_t const mode) { assert(mode < ct->nmodes); idx_t const nnzstart = nnztile_ptr[tile_id]; idx_t const nnzend = nnztile_ptr[tile_id+1]; idx_t const nnz = nnzend - nnzstart; /* outer mode is easy; just look at outer indices */ if(mode == 0) { p_mk_outerptr(ct, tt, tile_id, nnztile_ptr); return; } /* the mode after accounting for dim_perm */ idx_t const * const restrict ttind = tt->ind[ct->dim_perm[mode]] + nnzstart; csf_sparsity * const pt = ct->pt + tile_id; /* we will edit this to point to the new fiber idxs instead of nnz */ idx_t * const restrict fprev = pt->fptr[mode-1]; /* first count nfibers */ idx_t nfibs = 0; /* foreach 'slice' in the previous dimension */ for(idx_t s=0; s < pt->nfibs[mode-1]; ++s) { ++nfibs; /* one by default per 'slice' */ /* count fibers in current hyperplane*/ for(idx_t f=fprev[s]+1; f < fprev[s+1]; ++f) { if(ttind[f] != ttind[f-1]) { ++nfibs; } } } pt->nfibs[mode] = nfibs; pt->fptr[mode] = splatt_malloc((nfibs+1) * sizeof(**(pt->fptr))); pt->fids[mode] = splatt_malloc(nfibs * sizeof(**(pt->fids))); idx_t * const restrict fp = pt->fptr[mode]; idx_t * const restrict fi = pt->fids[mode]; fp[0] = 0; /* now fill in fiber info */ idx_t nfound = 0; for(idx_t s=0; s < pt->nfibs[mode-1]; ++s) { idx_t const start = fprev[s]+1; idx_t const end = fprev[s+1]; /* mark start of subtree */ fprev[s] = nfound; fi[nfound] = ttind[start-1]; fp[nfound++] = start-1; /* mark fibers in current hyperplane */ for(idx_t f=start; f < end; ++f) { if(ttind[f] != ttind[f-1]) { fi[nfound] = ttind[f]; fp[nfound++] = f; } } } /* mark end of last hyperplane */ fprev[pt->nfibs[mode-1]] = nfibs; fp[nfibs] = nnz; } /** * @brief Allocate and fill a CSF tensor from a coordinate tensor without * tiling. * * @param ct The CSF tensor to fill out. * @param tt The sparse tensor to start from. */ static void p_csf_alloc_untiled( splatt_csf * const ct, sptensor_t * const tt) { idx_t const nmodes = tt->nmodes; tt_sort(tt, ct->dim_perm[0], ct->dim_perm); ct->ntiles = 1; for(idx_t m=0; m < nmodes; ++m) { ct->tile_dims[m] = 1; } ct->pt = splatt_malloc(sizeof(*(ct->pt))); csf_sparsity * const pt = ct->pt; /* last row of fptr is just nonzero inds */ pt->nfibs[nmodes-1] = ct->nnz; pt->fids[nmodes-1] = splatt_malloc(ct->nnz * sizeof(**(pt->fids))); pt->vals = splatt_malloc(ct->nnz * sizeof(*(pt->vals))); memcpy(pt->fids[nmodes-1], tt->ind[ct->dim_perm[nmodes-1]], ct->nnz * sizeof(**(pt->fids))); memcpy(pt->vals, tt->vals, ct->nnz * sizeof(*(pt->vals))); /* setup a basic tile ptr for one tile */ idx_t nnz_ptr[2]; nnz_ptr[0] = 0; nnz_ptr[1] = tt->nnz; /* create fptr entries for the rest of the modes, working down from roots. * Skip the bottom level (nnz) */ for(idx_t m=0; m < tt->nmodes-1; ++m) { p_mk_fptr(ct, tt, 0, nnz_ptr, m); } } /** * @brief Reorder the nonzeros in a sparse tensor using dense tiling and fill * a CSF tensor with the data. * * @param ct The CSF tensor to fill. * @param tt The sparse tensor to start from. * @param tile_func Function pointer to a sparse tensor tiling function, * e.g., tt_densetile() and tt_ccptile() * @param splatt_opts Options array for SPLATT - used for tile dimensions. */ static void p_csf_alloc_tiled( splatt_csf * const ct, sptensor_t * const tt, idx_t * (* tile_func)(sptensor_t * const, idx_t const * const), double const * const splatt_opts) { idx_t const nmodes = tt->nmodes; idx_t ntiles = 1; for(idx_t m=0; m < ct->nmodes; ++m) { idx_t const depth = csf_mode_depth(m, ct->dim_perm, ct->nmodes); if(depth >= splatt_opts[SPLATT_OPTION_TILEDEPTH]) { ct->tile_dims[m] = (idx_t) splatt_opts[SPLATT_OPTION_NTHREADS]; } else { ct->tile_dims[m] = 1; } ntiles *= ct->tile_dims[m]; } /* perform tensor tiling */ tt_sort(tt, ct->dim_perm[0], ct->dim_perm); idx_t * nnz_ptr = tile_func(tt, ct->tile_dims); ct->ntiles = ntiles; ct->pt = splatt_malloc(ntiles * sizeof(*(ct->pt))); for(idx_t t=0; t < ntiles; ++t) { idx_t const startnnz = nnz_ptr[t]; idx_t const endnnz = nnz_ptr[t+1]; idx_t const ptnnz = endnnz - startnnz; csf_sparsity * const pt = ct->pt + t; /* empty tile */ if(ptnnz == 0) { for(idx_t m=0; m < ct->nmodes; ++m) { pt->fptr[m] = NULL; pt->fids[m] = NULL; pt->nfibs[m] = 0; } /* first fptr may be accessed anyway */ pt->fptr[0] = (idx_t *) splatt_malloc(2 * sizeof(**(pt->fptr))); pt->fptr[0][0] = 0; pt->fptr[0][1] = 0; pt->vals = NULL; continue; } /* last row of fptr is just nonzero inds */ pt->nfibs[nmodes-1] = ptnnz; pt->fids[nmodes-1] = splatt_malloc(ptnnz * sizeof(**(pt->fids))); memcpy(pt->fids[nmodes-1], tt->ind[ct->dim_perm[nmodes-1]] + startnnz, ptnnz * sizeof(**(pt->fids))); pt->vals = splatt_malloc(ptnnz * sizeof(*(pt->vals))); memcpy(pt->vals, tt->vals + startnnz, ptnnz * sizeof(*(pt->vals))); /* create fptr entries for the rest of the modes */ for(idx_t m=0; m < tt->nmodes-1; ++m) { p_mk_fptr(ct, tt, t, nnz_ptr, m); } } free(nnz_ptr); } /** * @brief Allocate and fill a CSF tensor. * * @param ct The CSF tensor to fill. * @param tt The coordinate tensor to work from. * @param mode_type The allocation scheme for the CSF tensor. * @param mode Which mode we are converting for (if applicable). * @param splatt_opts Used to determine tiling scheme. */ static void p_mk_csf( splatt_csf * const ct, sptensor_t * const tt, csf_mode_type mode_type, idx_t const mode, double const * const splatt_opts) { ct->nnz = tt->nnz; ct->nmodes = tt->nmodes; for(idx_t m=0; m < tt->nmodes; ++m) { ct->dims[m] = tt->dims[m]; } /* get the indices in order */ csf_find_mode_order(tt->dims, tt->nmodes, mode_type, mode, ct->dim_perm); ct->which_tile = splatt_opts[SPLATT_OPTION_TILE]; switch(ct->which_tile) { case SPLATT_NOTILE: p_csf_alloc_untiled(ct, tt); break; case SPLATT_DENSETILE: p_csf_alloc_tiled(ct, tt, tt_densetile, splatt_opts); break; case SPLATT_CCPTILE: p_csf_alloc_tiled(ct, tt, tt_ccptile, splatt_opts); break; default: fprintf(stderr, "SPLATT: tiling '%d' unsupported for CSF tensors.\n", ct->which_tile); break; } } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void csf_free( splatt_csf * const csf, double const * const opts) { idx_t ntensors = 0; splatt_csf_type which = opts[SPLATT_OPTION_CSF_ALLOC]; switch(which) { case SPLATT_CSF_ONEMODE: ntensors = 1; break; case SPLATT_CSF_TWOMODE: ntensors = 2; break; case SPLATT_CSF_ALLMODE: ntensors = csf[0].nmodes; break; } for(idx_t i=0; i < ntensors; ++i) { csf_free_mode(csf + i); } free(csf); } void csf_free_mode( splatt_csf * const csf) { /* free each tile of sparsity pattern */ for(idx_t t=0; t < csf->ntiles; ++t) { free(csf->pt[t].vals); free(csf->pt[t].fids[csf->nmodes-1]); for(idx_t m=0; m < csf->nmodes-1; ++m) { free(csf->pt[t].fptr[m]); free(csf->pt[t].fids[m]); } } free(csf->pt); } void csf_find_mode_order( idx_t const * const dims, idx_t const nmodes, csf_mode_type which, idx_t const mode, idx_t * const perm_dims) { switch(which) { case CSF_SORTED_SMALLFIRST: p_order_dims_small(dims, nmodes, perm_dims); break; case CSF_SORTED_BIGFIRST: p_order_dims_large(dims, nmodes, perm_dims); break; case CSF_INORDER_MINUSONE: p_order_dims_inorder(dims, nmodes, mode, perm_dims); break; case CSF_SORTED_MINUSONE: p_order_dims_minusone(dims, nmodes, mode, perm_dims); break; default: fprintf(stderr, "SPLATT: csf_mode_type '%d' not recognized.\n", which); break; } } size_t csf_storage( splatt_csf const * const tensors, double const * const opts) { idx_t ntensors = 0; splatt_csf_type which_alloc = opts[SPLATT_OPTION_CSF_ALLOC]; switch(which_alloc) { case SPLATT_CSF_ONEMODE: ntensors = 1; break; case SPLATT_CSF_TWOMODE: ntensors = 2; break; case SPLATT_CSF_ALLMODE: ntensors = tensors[0].nmodes; break; } size_t bytes = 0; for(idx_t m=0; m < ntensors; ++m) { splatt_csf const * const ct = tensors + m; bytes += ct->nnz * sizeof(*(ct->pt->vals)); /* vals */ bytes += ct->nnz * sizeof(**(ct->pt->fids)); /* fids[nmodes] */ bytes += ct->ntiles * sizeof(*(ct->pt)); /* pt */ for(idx_t t=0; t < ct->ntiles; ++t) { csf_sparsity const * const pt = ct->pt + t; for(idx_t m=0; m < ct->nmodes-1; ++m) { bytes += (pt->nfibs[m]+1) * sizeof(**(pt->fptr)); /* fptr */ if(pt->fids[m] != NULL) { bytes += pt->nfibs[m] * sizeof(**(pt->fids)); /* fids */ } } } } return bytes; } splatt_csf * csf_alloc( sptensor_t * const tt, double const * const opts) { splatt_csf * ret = NULL; double * tmp_opts = NULL; idx_t last_mode = 0; int tmp = 0; switch((splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC]) { case SPLATT_CSF_ONEMODE: ret = splatt_malloc(sizeof(*ret)); p_mk_csf(ret, tt, CSF_SORTED_SMALLFIRST, 0, opts); break; case SPLATT_CSF_TWOMODE: ret = splatt_malloc(2 * sizeof(*ret)); /* regular CSF allocation */ p_mk_csf(ret + 0, tt, CSF_SORTED_SMALLFIRST, 0, opts); /* make a copy of opts and don't tile the last mode * TODO make this configurable? */ tmp_opts = splatt_default_opts(); memcpy(tmp_opts, opts, SPLATT_OPTION_NOPTIONS * sizeof(*opts)); tmp_opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE; /* allocate with no tiling for the last mode */ last_mode = ret[0].dim_perm[tt->nmodes-1]; p_mk_csf(ret + 1, tt, CSF_SORTED_MINUSONE, last_mode, tmp_opts); free(tmp_opts); break; case SPLATT_CSF_ALLMODE: ret = splatt_malloc(tt->nmodes * sizeof(*ret)); for(idx_t m=0; m < tt->nmodes; ++m) { p_mk_csf(ret + m, tt, CSF_SORTED_MINUSONE, m, opts); } break; } return ret; } void csf_alloc_mode( sptensor_t * const tt, csf_mode_type which_ordering, idx_t const mode_special, splatt_csf * const csf, double const * const opts) { p_mk_csf(csf, tt, which_ordering, mode_special, opts); } val_t csf_frobsq( splatt_csf const * const tensor) { /* accumulate into double to help with some precision loss */ double norm = 0; #pragma omp parallel reduction(+:norm) { for(idx_t t=0; t < tensor->ntiles; ++t) { val_t const * const vals = tensor->pt[t].vals; if(vals == NULL) { continue; } idx_t const nnz = tensor->pt[t].nfibs[tensor->nmodes-1]; #pragma omp for schedule(static) nowait for(idx_t n=0; n < nnz; ++n) { norm += vals[n] * vals[n]; } } } /* end omp parallel */ return (val_t) norm; } idx_t * csf_partition_1d( splatt_csf const * const csf, idx_t const tile_id, idx_t const nparts) { idx_t * parts = splatt_malloc((nparts+1) * sizeof(*parts)); idx_t const nslices = csf->pt[tile_id].nfibs[0]; idx_t * weights = splatt_malloc(nslices * sizeof(*weights)); #pragma omp parallel for for(idx_t i=0; i < nslices; ++i) { weights[i] = csf_count_nnz(csf->pt[tile_id].fptr, csf->nmodes, 0, i); } partition_1d(weights, nslices, parts, nparts); splatt_free(weights); return parts; } idx_t * csf_partition_tiles_1d( splatt_csf const * const csf, idx_t const nparts) { idx_t * parts = splatt_malloc((nparts+1) * sizeof(*parts)); idx_t const nmodes = csf->nmodes; idx_t const ntiles = csf->ntiles; idx_t * weights = splatt_malloc(ntiles * sizeof(*weights)); #pragma omp parallel for for(idx_t i=0; i < ntiles; ++i) { weights[i] = csf->pt->nfibs[nmodes-1]; } partition_1d(weights, ntiles, parts, nparts); splatt_free(weights); return parts; } idx_t csf_count_nnz( idx_t * * fptr, idx_t const nmodes, idx_t depth, idx_t const fiber) { if(depth == nmodes-1) { return 1; } idx_t left = fptr[depth][fiber]; idx_t right = fptr[depth][fiber+1]; ++depth; for(; depth < nmodes-1; ++depth) { left = fptr[depth][left]; right = fptr[depth][right]; } return right - left; }
prepress.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS % % P P R R E P P R R E SS SS % % PPPP RRRR EEE PPPP RRRR EEE SSS SSS % % P R R E P R R E SS SS % % P R R EEEEE P R R EEEEE SSSSS SSSSS % % % % % % MagickCore Prepress Methods % % % % Software Design % % John Cristy % % October 2001 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/prepress.h" #include "magick/registry.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T o t a l I n k D e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageTotalInkDensity() returns the total ink density for a CMYK image. % Total Ink Density (TID) is determined by adding the CMYK values in the % darkest shadow area in an image. % % The format of the GetImageTotalInkDensity method is: % % double GetImageTotalInkDensity(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport double GetImageTotalInkDensity(Image *image) { CacheView *image_view; double total_ink_density; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'",image->filename); return(0.0); } status=MagickTrue; total_ink_density=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double density; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { density=(double) GetPixelRed(p)+GetPixelGreen(p)+ GetPixelBlue(p)+GetPixelIndex(indexes+x); if (density > total_ink_density) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageTotalInkDensity) #endif { if (density > total_ink_density) total_ink_density=density; } p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) total_ink_density=0.0; return(total_ink_density); }
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; INIT(); // // Test: Single. // for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; TEST({ for (int i = 0; i < N; i++) { A[i] = 1; B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for nowait schedule(static,1)") for (int i = 0; i < N; i++) { B[i] = D[i] - E[i]; } _Pragma("omp single") { for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; } } _Pragma("omp for schedule(static,1)") for (int i = 0; i < N; i++) { B[i] += A[i]; } } }, VERIFY(0, N, B[i], 3*i+2)); } // // Test: Single - private, nowait. // for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; TEST({ for (int i = 0; i < N; i++) { A[i] = 1; B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for nowait schedule(static,1)") for (int i = 0; i < N; i++) { B[i] = D[i] - E[i]; } _Pragma("omp single nowait private(A)") { for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; } } _Pragma("omp for schedule(static,1)") for (int i = 0; i < N; i++) { B[i] += A[i]; } } }, VERIFY(0, N, B[i], 2*i+1)); } // // Test: Single - firstprivate, should not have nowait. // for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; TEST({ for (int i = 0; i < N; i++) { A[i] = 1; B[i] = 0; } _Pragma("omp parallel if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for schedule(static,1)") for (int i = 0; i < N; i++) { B[i] = D[i] - E[i]; } _Pragma("omp single firstprivate(A)") { for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; B[i] += A[i]; } } _Pragma("omp for schedule(static,1)") for (int i = 0; i < N; i++) { B[i] += A[i]; } } }, VERIFY(0, N, B[i], 3*i+3)); } #if 0 // // Test: Single - copyprivate, should not have nowait. // for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; TEST({ for (int i = 0; i < N; i++) { A[i] = 1; B[i] = 0; } _Pragma("omp parallel private(A) if(threads[0] > 1) num_threads(threads[0])") { _Pragma("omp for schedule(static,1)") for (int i = 0; i < N; i++) { B[i] = D[i] - E[i]; A[i] = i; } _Pragma("omp single copyprivate(A)") { for (int i = 0; i < N; i++) { A[i] += C[i] + D[i]; B[i] += A[i]; } } } for (int i = 0; i < N; i++) { B[i] += A[i]; } }, VERIFY(0, N, B[i], 4*i+3)); } #endif return 0; }
pcgetrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzgetrf.c, normal z -> c, Fri Sep 28 17:38:11 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex32_t*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pcgetrf(plasma_desc_t A, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; // Read parameters from the context. plasma_context_t *plasma = plasma_context_self(); // Set tiling parameters. int ib = plasma->ib; int minmtnt = imin(A.mt, A.nt); for (int k = 0; k < minmtnt; k++) { plasma_complex32_t *a00, *a20; a00 = A(k, k); a20 = A(A.mt-1, k); // Create fake dependencies of the whole panel on its individual tiles. // These tasks are inserted to generate a correct DAG rather than // doing any useful work. for (int m = k+1; m < A.mt-1; m++) { plasma_complex32_t *amk = A(m, k); #pragma omp task depend (in:amk[0]) \ depend (inout:a00[0]) \ priority(1) { // Do some funny work here. It appears so that the compiler // might not insert the task if it is completely empty. int l = 1; l++; } } int ma00k = (A.mt-k-1)*A.mb; int na00k = plasma_tile_nmain(A, k); int lda20 = plasma_tile_mmain(A, A.mt-1); int nvak = plasma_tile_nview(A, k); int mvak = plasma_tile_mview(A, k); int ldak = plasma_tile_mmain(A, k); int num_panel_threads = imin(plasma->max_panel_threads, minmtnt-k); // panel #pragma omp task depend(inout:a00[0:ma00k*na00k]) \ depend(inout:a20[0:lda20*nvak]) \ depend(out:ipiv[k*A.mb:mvak]) \ priority(1) { volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int)); if (max_idx == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile plasma_complex32_t *max_val = (plasma_complex32_t*)malloc(num_panel_threads*sizeof( plasma_complex32_t)); if (max_val == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile int info = 0; plasma_barrier_t barrier; plasma_barrier_init(&barrier); if (sequence->status == PlasmaSuccess) { // If nesting would not be expensive on architectures such as // KNL, this would resolve the issue with deadlocks caused by // tasks expected to run are in fact not launched. //#pragma omp parallel for shared(barrier) // schedule(dynamic,1) // num_threads(num_panel_threads) #pragma omp taskloop untied shared(barrier) \ num_tasks(num_panel_threads) \ priority(2) for (int rank = 0; rank < num_panel_threads; rank++) { { plasma_desc_t view = plasma_desc_view(A, k*A.mb, k*A.nb, A.m-k*A.mb, nvak); plasma_core_cgetrf(view, &ipiv[k*A.mb], ib, rank, num_panel_threads, max_idx, max_val, &info, &barrier); if (info != 0) plasma_request_fail(sequence, request, k*A.mb+info); } } } #pragma omp taskwait free((void*)max_idx); free((void*)max_val); for (int i = k*A.mb+1; i <= imin(A.m, k*A.mb+nvak); i++) ipiv[i-1] += k*A.mb; } // update for (int n = k+1; n < A.nt; n++) { plasma_complex32_t *a01, *a11, *a21; a01 = A(k, n); a11 = A(k+1, n); a21 = A(A.mt-1, n); int ma11k = (A.mt-k-2)*A.mb; int na11n = plasma_tile_nmain(A, n); int lda21 = plasma_tile_mmain(A, A.mt-1); int nvan = plasma_tile_nview(A, n); #pragma omp task depend(in:a00[0:ma00k*na00k]) \ depend(in:a20[0:lda20*nvak]) \ depend(in:ipiv[k*A.mb:mvak]) \ depend(inout:a01[0:ldak*nvan]) \ depend(inout:a11[0:ma11k*na11n]) \ depend(inout:a21[0:lda21*nvan]) \ priority(n == k+1) { if (sequence->status == PlasmaSuccess) { // geswp int k1 = k*A.mb+1; int k2 = imin(k*A.mb+A.mb, A.m); plasma_desc_t view = plasma_desc_view(A, 0, n*A.nb, A.m, nvan); plasma_core_cgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); // trsm plasma_core_ctrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, mvak, nvan, 1.0, A(k, k), ldak, A(k, n), ldak); // gemm for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); #pragma omp task priority(n == k+1) { plasma_core_cgemm( PlasmaNoTrans, PlasmaNoTrans, mvam, nvan, A.nb, -1.0, A(m, k), ldam, A(k, n), ldak, 1.0, A(m, n), ldam); } } } #pragma omp taskwait } } } // Multidependency of the whole ipiv on the individual chunks // corresponding to tiles. for (int m = 0; m < minmtnt; m++) { // insert dummy task #pragma omp task depend (in:ipiv[m*A.mb]) \ depend (inout:ipiv[0]) { int l = 1; l++; } } // pivoting to the left for (int k = 0; k < minmtnt-1; k++) { plasma_complex32_t *a10, *a20; a10 = A(k+1, k); a20 = A(A.mt-1, k); int ma10k = (A.mt-k-2)*A.mb; int na00k = plasma_tile_nmain(A, k); int lda20 = plasma_tile_mmain(A, A.mt-1); int nvak = plasma_tile_nview(A, k); #pragma omp task depend(in:ipiv[0:imin(A.m,A.n)]) \ depend(inout:a10[0:ma10k*na00k]) \ depend(inout:a20[0:lda20*nvak]) { if (sequence->status == PlasmaSuccess) { plasma_desc_t view = plasma_desc_view(A, 0, k*A.nb, A.m, A.nb); int k1 = (k+1)*A.mb+1; int k2 = imin(A.m, A.n); plasma_core_cgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); } } // Multidependency of individual tiles on the whole panel. for (int m = k+2; m < A.mt-1; m++) { plasma_complex32_t *amk = A(m, k); #pragma omp task depend (in:a10[0]) \ depend (inout:amk[0]) { // Do some funny work here. It appears so that the compiler // might not insert the task if it is completely empty. int l = 1; l++; } } } }
rhs.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include <math.h> #include "header.h" void compute_rhs() { int i, j, k, m; double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /* //kai int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11; consistent_data(&k1, "int", 1); consistent_data(&k2, "int", 1); consistent_data(&k3, "int", 1); consistent_data(&k4, "int", 1); consistent_data(&k5, "int", 1); consistent_data(&k6, "int", 1); consistent_data(&k7, "int", 1); consistent_data(&k8, "int", 1); consistent_data(&k9, "int", 1); consistent_data(&k10, "int", 1); consistent_data(&k11, "int", 1); */ if (timeron) timer_start(t_rhs); #pragma omp parallel default(shared) private(i,j,k,m,rho_inv,aux,uijk, \ up1,um1,vijk,vp1,vm1,wijk,wp1,wm1) { //--------------------------------------------------------------------- // compute the reciprocal of density, and the kinetic energy, // and the speed of sound. //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = k1+1; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { rho_inv = 1.0/u[k][j][i][0]; rho_i[k][j][i] = rho_inv; us[k][j][i] = u[k][j][i][1] * rho_inv; vs[k][j][i] = u[k][j][i][2] * rho_inv; ws[k][j][i] = u[k][j][i][3] * rho_inv; square[k][j][i] = 0.5* ( u[k][j][i][1]*u[k][j][i][1] + u[k][j][i][2]*u[k][j][i][2] + u[k][j][i][3]*u[k][j][i][3] ) * rho_inv; qs[k][j][i] = square[k][j][i] * rho_inv; //------------------------------------------------------------------- // (don't need speed and ainx until the lhs computation) //------------------------------------------------------------------- aux = c1c2*rho_inv* (u[k][j][i][4] - square[k][j][i]); speed[k][j][i] = sqrt(aux); } } //kai k1 = 0; } //--------------------------------------------------------------------- // copy the exact forcing term to the right hand side; because // this forcing term is known, we can store it on the whole grid // including the boundary //--------------------------------------------------------------------- #pragma omp for schedule(static) for (k = k2+1; k <= nz2+1; k++) { for (j = 0; j <= ny2+1; j++) { for (i = 0; i <= nx2+1; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = forcing[k][j][i][m]; } } } //kai k2 = 0; } //--------------------------------------------------------------------- // compute xi-direction fluxes //--------------------------------------------------------------------- #pragma omp master if (timeron) timer_start(t_rhsx); #pragma omp for schedule(static) nowait for (k = k3+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { uijk = us[k][j][i]; up1 = us[k][j][i+1]; um1 = us[k][j][i-1]; rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i+1][0] - 2.0*u[k][j][i][0] + u[k][j][i-1][0]) - tx2 * (u[k][j][i+1][1] - u[k][j][i-1][1]); rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i+1][1] - 2.0*u[k][j][i][1] + u[k][j][i-1][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[k][j][i+1][1]*up1 - u[k][j][i-1][1]*um1 + (u[k][j][i+1][4] - square[k][j][i+1] - u[k][j][i-1][4] + square[k][j][i-1]) * c2); rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i+1][2] - 2.0*u[k][j][i][2] + u[k][j][i-1][2]) + xxcon2 * (vs[k][j][i+1] - 2.0*vs[k][j][i] + vs[k][j][i-1]) - tx2 * (u[k][j][i+1][2]*up1 - u[k][j][i-1][2]*um1); rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i+1][3] - 2.0*u[k][j][i][3] + u[k][j][i-1][3]) + xxcon2 * (ws[k][j][i+1] - 2.0*ws[k][j][i] + ws[k][j][i-1]) - tx2 * (u[k][j][i+1][3]*up1 - u[k][j][i-1][3]*um1); rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i+1][4] - 2.0*u[k][j][i][4] + u[k][j][i-1][4]) + xxcon3 * (qs[k][j][i+1] - 2.0*qs[k][j][i] + qs[k][j][i-1]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[k][j][i+1][4]*rho_i[k][j][i+1] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j][i-1][4]*rho_i[k][j][i-1]) - tx2 * ( (c1*u[k][j][i+1][4] - c2*square[k][j][i+1])*up1 - (c1*u[k][j][i-1][4] - c2*square[k][j][i-1])*um1 ); } } //--------------------------------------------------------------------- // add fourth order xi-direction dissipation //--------------------------------------------------------------------- for (j = 1; j <= ny2; j++) { i = 1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } i = 2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m]); } } for (j = 1; j <= ny2; j++) { for (i = 3; i <= nx2-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] + u[k][j][i+2][m] ); } } } for (j = 1; j <= ny2; j++) { i = nx2-1; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j][i+1][m] ); } i = nx2; for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j][i-2][m] - 4.0*u[k][j][i-1][m] + 5.0*u[k][j][i][m] ); } } //kai k3 = 0; } #pragma omp master { if (timeron) timer_stop(t_rhsx); //--------------------------------------------------------------------- // compute eta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsy); } #pragma omp for schedule(static) for (k = k4+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { vijk = vs[k][j][i]; vp1 = vs[k][j+1][i]; vm1 = vs[k][j-1][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j+1][i][0] - 2.0*u[k][j][i][0] + u[k][j-1][i][0]) - ty2 * (u[k][j+1][i][2] - u[k][j-1][i][2]); rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j+1][i][1] - 2.0*u[k][j][i][1] + u[k][j-1][i][1]) + yycon2 * (us[k][j+1][i] - 2.0*us[k][j][i] + us[k][j-1][i]) - ty2 * (u[k][j+1][i][1]*vp1 - u[k][j-1][i][1]*vm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j+1][i][2] - 2.0*u[k][j][i][2] + u[k][j-1][i][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[k][j+1][i][2]*vp1 - u[k][j-1][i][2]*vm1 + (u[k][j+1][i][4] - square[k][j+1][i] - u[k][j-1][i][4] + square[k][j-1][i]) * c2); rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j+1][i][3] - 2.0*u[k][j][i][3] + u[k][j-1][i][3]) + yycon2 * (ws[k][j+1][i] - 2.0*ws[k][j][i] + ws[k][j-1][i]) - ty2 * (u[k][j+1][i][3]*vp1 - u[k][j-1][i][3]*vm1); rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j+1][i][4] - 2.0*u[k][j][i][4] + u[k][j-1][i][4]) + yycon3 * (qs[k][j+1][i] - 2.0*qs[k][j][i] + qs[k][j-1][i]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[k][j+1][i][4]*rho_i[k][j+1][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k][j-1][i][4]*rho_i[k][j-1][i]) - ty2 * ((c1*u[k][j+1][i][4] - c2*square[k][j+1][i]) * vp1 - (c1*u[k][j-1][i][4] - c2*square[k][j-1][i]) * vm1); } } //--------------------------------------------------------------------- // add fourth order eta-direction dissipation //--------------------------------------------------------------------- j = 1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * ( 5.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } j = 2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m]); } } for (j = 3; j <= ny2-2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] + u[k][j+2][i][m] ); } } } j = ny2-1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k][j+1][i][m] ); } } j = ny2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k][j-2][i][m] - 4.0*u[k][j-1][i][m] + 5.0*u[k][j][i][m] ); } } //kai k4 = 0; } #pragma omp master { if (timeron) timer_stop(t_rhsy); //--------------------------------------------------------------------- // compute zeta-direction fluxes //--------------------------------------------------------------------- if (timeron) timer_start(t_rhsz); } #pragma omp for schedule(static) for (k = k5+1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { wijk = ws[k][j][i]; wp1 = ws[k+1][j][i]; wm1 = ws[k-1][j][i]; rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k+1][j][i][0] - 2.0*u[k][j][i][0] + u[k-1][j][i][0]) - tz2 * (u[k+1][j][i][3] - u[k-1][j][i][3]); rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k+1][j][i][1] - 2.0*u[k][j][i][1] + u[k-1][j][i][1]) + zzcon2 * (us[k+1][j][i] - 2.0*us[k][j][i] + us[k-1][j][i]) - tz2 * (u[k+1][j][i][1]*wp1 - u[k-1][j][i][1]*wm1); rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k+1][j][i][2] - 2.0*u[k][j][i][2] + u[k-1][j][i][2]) + zzcon2 * (vs[k+1][j][i] - 2.0*vs[k][j][i] + vs[k-1][j][i]) - tz2 * (u[k+1][j][i][2]*wp1 - u[k-1][j][i][2]*wm1); rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k+1][j][i][3] - 2.0*u[k][j][i][3] + u[k-1][j][i][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[k+1][j][i][3]*wp1 - u[k-1][j][i][3]*wm1 + (u[k+1][j][i][4] - square[k+1][j][i] - u[k-1][j][i][4] + square[k-1][j][i]) * c2); rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k+1][j][i][4] - 2.0*u[k][j][i][4] + u[k-1][j][i][4]) + zzcon3 * (qs[k+1][j][i] - 2.0*qs[k][j][i] + qs[k-1][j][i]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[k+1][j][i][4]*rho_i[k+1][j][i] - 2.0*u[k][j][i][4]*rho_i[k][j][i] + u[k-1][j][i][4]*rho_i[k-1][j][i]) - tz2 * ((c1*u[k+1][j][i][4] - c2*square[k+1][j][i])*wp1 - (c1*u[k-1][j][i][4] - c2*square[k-1][j][i])*wm1); } } //kai k5 = 0; } //--------------------------------------------------------------------- // add fourth order zeta-direction dissipation //--------------------------------------------------------------------- k = 1; #pragma omp for schedule(static) nowait for (j = k6+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m]- dssp * (5.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k6 = 0; } k = 2; #pragma omp for schedule(static) nowait for (j = k7+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m]); } } //kai k7 = 0; } #pragma omp for schedule(static) nowait for (k = k8+1; k <= grid_points[2]-4; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] + u[k+2][j][i][m] ); } } } //kai k8 = 2; } k = grid_points[2]-3; #pragma omp for schedule(static) nowait for (j = k9+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 6.0*u[k][j][i][m] - 4.0*u[k+1][j][i][m] ); } } //kai k9 = 0; } k = grid_points[2]-2; #pragma omp for schedule(static) for (j = k10+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * ( u[k-2][j][i][m] - 4.0*u[k-1][j][i][m] + 5.0*u[k][j][i][m] ); } } //kai k10 = 0; } #pragma omp master if (timeron) timer_stop(t_rhsz); #pragma omp for schedule(static) nowait for (k = k11+1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] * dt; } } } //kai k11 = 0; } } //end parallel if (timeron) timer_stop(t_rhs); }
simd_utils_avx512_int32.h
/* * Project : SIMD_Utils * Version : 0.1.12 * Author : JishinMaster * Licence : BSD-2 */ #pragma once #include <stdint.h> #include "immintrin.h" static inline void add512s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_store_si512(dst + i, _mm512_add_epi32(_mm512_load_si512(src1 + i), _mm512_load_si512(src2 + i))); } } else { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_storeu_si512(dst + i, _mm512_add_epi32(_mm512_loadu_si512(src1 + i), _mm512_loadu_si512(src2 + i))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] + src2[i]; } } static inline void mul512s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_store_si512(dst + i, _mm512_mul_epi32(_mm512_load_si512(src1 + i), _mm512_load_si512(src2 + i))); } } else { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_storeu_si512(dst + i, _mm512_mul_epi32(_mm512_loadu_si512(src1 + i), _mm512_loadu_si512(src2 + i))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] * src2[i]; } } static inline void sub512s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_store_si512(dst + i, _mm512_sub_epi32(_mm512_load_si512(src1 + i), _mm512_load_si512(src2 + i))); } } else { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_storeu_si512(dst + i, _mm512_sub_epi32(_mm512_loadu_si512(src1 + i), _mm512_loadu_si512(src2 + i))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] - src2[i]; } } static inline void addc512s(int32_t *src, int32_t value, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; const v16si tmp = _mm512_set1_epi32(value); if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX512_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_store_si512(dst + i, _mm512_add_epi32(tmp, _mm512_load_si512(src + i))); } } else { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_storeu_si512(dst + i, _mm512_add_epi32(tmp, _mm512_loadu_si512(src + i))); } } for (int i = stop_len; i < len; i++) { dst[i] = src[i] + value; } } // Experimental static inline void copy512s(int32_t *src, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_store_si512((__m512i *) (dst + i), _mm512_load_si512((__m512i *) (src + i))); } for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void copy512s_2(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * AVX512_LEN_INT32); stop_len *= (2 * AVX512_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 2 * AVX512_LEN_INT32) { __m512i tmp1 = _mm512_load_si512((__m512i *) (src + i)); __m512i tmp2 = _mm512_load_si512((__m512i *) (src + i + AVX512_LEN_INT32)); _mm512_store_si512((__m512i *) (dst + i), tmp1); _mm512_store_si512((__m512i *) (dst + i + AVX512_LEN_INT32), tmp2); } for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy512s(int32_t *src, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { _mm512_stream_si512((__m512i *) (dst + i), _mm512_stream_load_si512((__m512i *) (src + i))); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy512s_2(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * AVX512_LEN_INT32); stop_len *= (2 * AVX512_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 2 * AVX512_LEN_INT32) { __m512i tmp1 = _mm512_stream_load_si512((__m512i *) (src + i)); __m512i tmp2 = _mm512_stream_load_si512((__m512i *) (src + i + AVX512_LEN_INT32)); _mm512_stream_si512((__m512i *) (dst + i), tmp1); _mm512_stream_si512((__m512i *) (dst + i + AVX512_LEN_INT32), tmp2); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy512s_4(int32_t *src, int32_t *dst, int len) { int stop_len = len / (4 * AVX512_LEN_INT32); stop_len *= (4 * AVX512_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 4 * AVX512_LEN_INT32) { __m512i tmp1 = _mm512_stream_load_si512((__m512i *) (src + i)); __m512i tmp2 = _mm512_stream_load_si512((__m512i *) (src + i + AVX512_LEN_INT32)); __m512i tmp3 = _mm512_stream_load_si512((__m512i *) (src + i + 2 * AVX512_LEN_INT32)); __m512i tmp4 = _mm512_stream_load_si512((__m512i *) (src + i + 3 * AVX512_LEN_INT32)); _mm512_stream_si512((__m512i *) (dst + i), tmp1); _mm512_stream_si512((__m512i *) (dst + i + AVX512_LEN_INT32), tmp2); _mm512_stream_si512((__m512i *) (dst + i + 2 * AVX512_LEN_INT32), tmp3); _mm512_stream_si512((__m512i *) (dst + i + 3 * AVX512_LEN_INT32), tmp4); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } //to be improved? static inline __m512i _mm512_absdiff_epi16(__m512i a, __m512i b) { __m512i cmp, difab, difba; __m512i zero = _mm512_setzero_epi32(); __mmask64 cmp_mask = _mm512_cmpgt_epi16_mask(a,b); cmp = _mm512_mask_set1_epi16(zero, cmp_mask, 0xFFFF); difab = _mm512_sub_epi16(a,b); difba = _mm512_sub_epi16 (b,a); difab = _mm512_and_si512(cmp, difab); difba = _mm512_andnot_si512(cmp, difba); return _mm512_or_si512(difab, difba); } static inline __m512i _mm512_absdiff_epi32(__m512i a, __m512i b) { __m512i cmp, difab, difba; __m512i zero = _mm512_setzero_epi32(); __mmask64 cmp_mask = _mm512_cmpgt_epi32_mask(a,b); cmp = _mm512_mask_set1_epi32(zero, cmp_mask, 0xFFFFFFFF); difab = _mm512_sub_epi32(a,b); difba = _mm512_sub_epi32 (b,a); difab = _mm512_and_si512(cmp, difab); difba = _mm512_andnot_si512(cmp, difba); return _mm512_or_si512(difab, difba); } static inline __m512i _mm512_absdiff_epi8(__m512i a, __m512i b) { __m512i cmp, difab, difba; __m512i zero = _mm512_setzero_epi32(); __mmask64 cmp_mask = _mm512_cmpgt_epi8_mask(a,b); cmp = _mm512_mask_set1_epi8(zero, cmp_mask, 0xFF); difab = _mm512_sub_epi8(a,b); difba = _mm512_sub_epi8 (b,a); difab = _mm512_and_si512(cmp, difab); difba = _mm512_andnot_si512(cmp, difba); return _mm512_or_si512(difab, difba); } static inline void absdiff16s_512s(int16_t *src1, int16_t *src2, int16_t *dst, int len) { int stop_len = len / AVX512_LEN_INT16; stop_len *= AVX512_LEN_INT16; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX512_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX512_LEN_INT16) { __m512i a = _mm512_load_si512((__m512i *) (src1 + i)); __m512i b = _mm512_load_si512((__m512i *) (src2 + i)); _mm512_store_si512((__m512i *)(dst + i), _mm512_absdiff_epi16(a,b)); } } else { for (int i = 0; i < stop_len; i += AVX512_LEN_INT16) { __m512i a = _mm512_loadu_si512((__m512i *) (src1 + i)); __m512i b = _mm512_loadu_si512((__m512i *) (src2 + i)); _mm512_storeu_si512((__m512i *) (dst + i), _mm512_absdiff_epi16(a,b)); } } for (int i = stop_len; i < len; i++) { dst[i] = abs(src1[i] - src2[i]); } } static inline void powerspect16s_512s_interleaved(complex16s_t *src, int32_t *dst, int len) { int stop_len = len / AVX512_LEN_INT32; stop_len *= AVX512_LEN_INT32; int j = 0; if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX512_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { __m512i reim = _mm512_load_si512((__m512i *)((const int16_t *)src + j)); // print8i(reim); printf("\n"); _mm512_store_si512((__m512i*)(dst + i), _mm512_madd_epi16 (reim, reim)); j += AVX512_LEN_INT16; } } else { for (int i = 0; i < stop_len; i += AVX512_LEN_INT32) { __m512i reim = _mm512_loadu_si512((__m512i *)((const int16_t *)src + j)); _mm512_storeu_si512((__m512i*)(dst + i), _mm512_madd_epi16 (reim, reim)); j += AVX512_LEN_INT16; } } for (int i = stop_len; i < len; i++) { dst[i] = (int32_t)src[i].re * (int32_t)src[i].re + (int32_t)src[i].im * (int32_t)src[i].im; } }
seidel_2d-a.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 1000L #define T 1000L #endif #define NUM_FP_OPS 10 /* Define our arrays */ double A[2][N][N]; double total=0; double sum_err_sqr=0; int chtotal=0; int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; //printf("Number of points = %ld\t|Number of timesteps = %ld\t", N*N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[0][i][j] = 1.0 * (rand() % BASE); } } #ifdef TIME gettimeofday(&start, 0); #endif // #undef N // #define N 8000L #undef T #define T 500 /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 1) && (T >= 1)) { for (t1=-1;t1<=floord(T-1,4);t1++) { lbp=ceild(t1,2); ubp=min(floord(2*T+N-2,16),floord(8*t1+N+6,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(0,ceild(t1-63,64));t3<=min(floord(2*T+N-2,512),floord(8*t1+N+14,512));t3++) { if ((t1 <= floord(512*t3-N,8)) && (t2 <= 32*t3-1) && (t3 >= ceild(N,512))) { if (N%2 == 0) { for (t5=max(max(16*t2,512*t3-N+1),-16*t1+16*t2+1024*t3-2*N-13);t5<=min(16*t2+15,-16*t1+16*t2+1024*t3-2*N+2);t5++) { A[0][(-512*t3+t5+N-1)][(N-1)] = 0.111 * ( ( (-512*t3+t5+N-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-512*t3+t5+N-1) - 1][(N-1) - 1]) + A[1][(-512*t3+t5+N-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-512*t3+t5+N-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-512*t3+t5+N-1)][(N-1) - 1]) + A[1][(-512*t3+t5+N-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-512*t3+t5+N-1)][(N-1) + 1]) + ( (-512*t3+t5+N-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-512*t3+t5+N-1) + 1][(N-1) - 1]) + A[1][(-512*t3+t5+N-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-512*t3+t5+N-1) + 1][(N-1) + 1]) ) ) );; } } } if ((t1 <= floord(16*t2-N,8)) && (t2 >= ceild(N,16))) { if (N%2 == 0) { for (t6=max(512*t3,16*t2-N+1);t6<=min(16*t2,512*t3+511);t6++) { A[0][(N-1)][(-16*t2+t6+N-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t2+t6+N-1)==0 ? 0 : A[1][(N-1) - 1][(-16*t2+t6+N-1) - 1]) + A[1][(N-1) - 1][(-16*t2+t6+N-1)] + ((-16*t2+t6+N-1)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t2+t6+N-1) + 1]) ) ) + ((-16*t2+t6+N-1)==0 ? 0 : A[1][(N-1)][(-16*t2+t6+N-1) - 1]) + A[1][(N-1)][(-16*t2+t6+N-1)] + ((-16*t2+t6+N-1)==N-1 ? 0 : A[1][(N-1)][(-16*t2+t6+N-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t2+t6+N-1)==0 ? 0: A[1][(N-1) + 1][(-16*t2+t6+N-1) - 1]) + A[1][(N-1) + 1][(-16*t2+t6+N-1)] + ((-16*t2+t6+N-1)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t2+t6+N-1) + 1]) ) ) );; } } } if ((N >= 2) && (t1 == 2*t2) && (t1 <= floord(512*t3-N+511,8)) && (t1 >= ceild(512*t3-N+1,8))) { for (t6=max(8*t1,512*t3);t6<=8*t1+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-8*t1+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-8*t1+t6)==0 ? 0 : A[0][0 - 1][(-8*t1+t6) - 1]) + A[0][0 - 1][(-8*t1+t6)] + ((-8*t1+t6) == N-1 ? 0 : A[0][0 - 1][(-8*t1+t6) + 1]) ) ) + ((-8*t1+t6)==0 ? 0 : A[0][0][(-8*t1+t6) - 1]) + A[0][0][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0][(-8*t1+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-8*t1+t6)==0 ? 0: A[0][0 + 1][(-8*t1+t6) - 1]) + A[0][0 + 1][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0 + 1][(-8*t1+t6) + 1]) ) ) );; } } for (t5=8*t1+1;t5<=8*t1+2;t5++) { for (t6=max(512*t3,8*t1+1);t6<=8*t1+N;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-1)][(-8*t1+t6-1)] = 0.111 * ( ( (-8*t1+t5-1)==0 ? 0 : ( ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) + 1]) ) ) + ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1)][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) + 1]) + ( (-8*t1+t5-1)==N-1 ? 0 : ( ((-8*t1+t6-1)==0 ? 0: A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) + 1]) ) ) );; } } } } if ((t1 == 2*t2) && (t1 >= ceild(512*t3-N+512,8))) { for (t6=max(8*t1,512*t3);t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-8*t1+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-8*t1+t6)==0 ? 0 : A[0][0 - 1][(-8*t1+t6) - 1]) + A[0][0 - 1][(-8*t1+t6)] + ((-8*t1+t6) == N-1 ? 0 : A[0][0 - 1][(-8*t1+t6) + 1]) ) ) + ((-8*t1+t6)==0 ? 0 : A[0][0][(-8*t1+t6) - 1]) + A[0][0][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0][(-8*t1+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-8*t1+t6)==0 ? 0: A[0][0 + 1][(-8*t1+t6) - 1]) + A[0][0 + 1][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0 + 1][(-8*t1+t6) + 1]) ) ) );; } } for (t5=8*t1+1;t5<=8*t1+2;t5++) { for (t6=max(512*t3,8*t1+1);t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-1)][(-8*t1+t6-1)] = 0.111 * ( ( (-8*t1+t5-1)==0 ? 0 : ( ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) + 1]) ) ) + ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1)][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) + 1]) + ( (-8*t1+t5-1)==N-1 ? 0 : ( ((-8*t1+t6-1)==0 ? 0: A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) + 1]) ) ) );; } } } } if ((N == 1) && (t1 == 2*t2)) { if (t1%2 == 0) { A[1][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[0][0 - 1][0 - 1]) + A[0][0 - 1][0] + (0 == N-1 ? 0 : A[0][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][0][0 - 1]) + A[0][0][0] + (0==N-1 ? 0 : A[0][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[0][0 + 1][0 - 1]) + A[0][0 + 1][0] + (0==N-1 ? 0 : A[0][0 + 1][0 + 1]) ) ) );; } if (t1%2 == 0) { A[0][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[1][0 - 1][0 - 1]) + A[1][0 - 1][0] + (0==N-1 ? 0 : A[1][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[1][0][0 - 1]) + A[1][0][0] + (0==N-1 ? 0 : A[1][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[1][0 + 1][0 - 1]) + A[1][0 + 1][0] + (0==N-1 ? 0 : A[1][0 + 1][0 + 1]) ) ) );; } } for (t4=max(max(max(0,ceild(512*t3-N+1,2)),4*t1),8*t1-8*t2+8);t4<=min(min(min(min(floord(512*t3-N+511,2),floord(16*t1-16*t2+N-1,2)),T-1),4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=512*t3;t6<=2*t4+N;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(512*t3-N+1,2)),4*t1);t4<=min(min(min(min(floord(8*t1+N-9,2),floord(512*t3-N+511,2)),T-1),4*t1+3),256*t3-1);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+9;t5<=min(2*t4+N,-8*t1+4*t4+10);t5++) { for (t6=512*t3;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(0,ceild(512*t3-N+512,2)),4*t1),8*t1-8*t2+8);t4<=min(min(min(floord(16*t1-16*t2+N-1,2),T-1),4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(512*t3-N+512,2)),4*t1);t4<=min(min(T-1,4*t1+3),256*t3-1);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+9;t5<=-8*t1+4*t4+10;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+1,2),4*t1+1);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+3),256*t3-1);t4++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+1;t5<=-8*t1+4*t4+2;t5++) { for (t6=512*t3;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+1,2),4*t1+4);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+7),256*t3-1);t4++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+512,2),4*t1+1);t4<=min(min(T-1,4*t1+3),256*t3-1);t4++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+1;t5<=-8*t1+4*t4+2;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+512,2),4*t1+4);t4<=min(min(T-1,4*t1+7),256*t3-1);t4++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if ((N == 1) && (t1 == 2*t2)) { for (t4=4*t1+1;t4<=min(T-1,4*t1+7);t4++) { if (t1%2 == 0) { A[1][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[0][0 - 1][0 - 1]) + A[0][0 - 1][0] + (0 == N-1 ? 0 : A[0][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][0][0 - 1]) + A[0][0][0] + (0==N-1 ? 0 : A[0][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[0][0 + 1][0 - 1]) + A[0][0 + 1][0] + (0==N-1 ? 0 : A[0][0 + 1][0 + 1]) ) ) );; } if (t1%2 == 0) { A[0][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[1][0 - 1][0 - 1]) + A[1][0 - 1][0] + (0==N-1 ? 0 : A[1][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[1][0][0 - 1]) + A[1][0][0] + (0==N-1 ? 0 : A[1][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[1][0 + 1][0 - 1]) + A[1][0 + 1][0] + (0==N-1 ? 0 : A[1][0 + 1][0 + 1]) ) ) );; } } } for (t4=max(max(4*t1,256*t3),8*t1-8*t2+8);t4<=min(min(min(floord(512*t3-N+511,2),floord(16*t1-16*t2+N-1,2)),T-1),4*t1+3);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(4*t1,256*t3);t4<=min(min(min(floord(8*t1+N-9,2),floord(512*t3-N+511,2)),T-1),4*t1+3);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+9;t5<=min(2*t4+N,-8*t1+4*t4+10);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(ceild(512*t3-N+512,2),4*t1),256*t3),8*t1-8*t2+8);t4<=min(min(floord(16*t1-16*t2+N-1,2),T-1),4*t1+3);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(max(ceild(512*t3-N+512,2),4*t1),256*t3);t4<=min(min(floord(8*t1+N-9,2),T-1),4*t1+3);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+9;t5<=min(2*t4+N,-8*t1+4*t4+10);t5++) { for (t6=2*t4+1;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(max(0,ceild(16*t1-16*t2+N,2)),ceild(16*t2-N+1,2)),ceild(512*t3-N+1,2)),8*t1-8*t2+8);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=512*t3;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1 == 2*t2-1) && (t1 == 64*t3-1)) { for (t4=max(max(0,ceild(8*t1-N+9,2)),ceild(8*t1+N-8,2));t4<=min(T-1,4*t1+3);t4++) { for (t5=8*t1+8;t5<=2*t4+N-1;t5++) { for (t6=8*t1+8;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } for (t6=8*t1+8;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(0,ceild(16*t1-16*t2+N,2)),ceild(16*t2-N+1,2)),ceild(512*t3-N+512,2));t4<=min(min(T-1,4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(ceild(512*t3-N+1,2),4*t1+4);t4<=min(min(min(min(floord(16*t2-N+15,2),floord(512*t3-N+511,2)),floord(16*t1-16*t2+N+12,2)),T-1),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=512*t3;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(ceild(16*t2-N+16,2),ceild(512*t3-N+1,2)),4*t1+4),8*t1-8*t2+8);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+7),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } if (t1 <= min(min(min(floord(16*t2-N+1,8),floord(8*t2+256*t3-N+249,8)),floord(16*t2+2*T-N-15,16)),floord(16*t2+512*t3-N-15,16))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=512*t3;t6<=16*t1-16*t2+2*N+12;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=512*t3;t6<=16*t1-16*t2+2*N+13;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } for (t4=max(ceild(512*t3-N+512,2),4*t1+4);t4<=min(min(min(floord(16*t2-N+15,2),floord(16*t1-16*t2+N+12,2)),T-1),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(ceild(16*t2-N+16,2),ceild(512*t3-N+512,2)),4*t1+4),8*t1-8*t2+8);t4<=min(min(T-1,4*t1+7),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if ((t1 <= min(min(floord(16*t2-N+1,8),floord(16*t2+2*T-N-15,16)),floord(16*t2+512*t3-N-15,16))) && (t1 >= ceild(8*t2+256*t3-N+251,8))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } if (t1 == 2*t2) { for (t4=max(256*t3,4*t1+1);t4<=min(min(min(floord(8*t1+N-1,2),floord(512*t3-N+511,2)),T-1),4*t1+3);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+1;t5<=min(2*t4+N,-8*t1+4*t4+2);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(256*t3,4*t1+4);t4<=min(min(min(floord(8*t1+N-1,2),floord(512*t3-N+511,2)),T-1),4*t1+7);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(512*t3-N+512,2),256*t3),4*t1+1);t4<=min(T-1,4*t1+3);t4++) { for (t6=2*t4;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+1;t5<=-8*t1+4*t4+2;t5++) { for (t6=2*t4+1;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(ceild(16*t1-16*t2+N,2),ceild(16*t2-N+1,2)),256*t3),8*t1-8*t2+8);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+3);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1 == 2*t2-1) { for (t4=max(max(ceild(8*t1-N+9,2),ceild(8*t1+N-8,2)),256*t3);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+3);t4++) { for (t5=8*t1+8;t5<=2*t4+N-1;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } for (t4=max(max(max(max(ceild(16*t1-16*t2+N,2),ceild(16*t2-N+1,2)),ceild(512*t3-N+512,2)),256*t3),8*t1-8*t2+8);t4<=min(T-1,4*t1+3);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1 == 2*t2-1) && (t1 == 64*t3+63)) { for (t4=max(ceild(8*t1-N+9,2),ceild(8*t1+N-8,2));t4<=min(T-1,4*t1+3);t4++) { for (t5=8*t1+8;t5<=2*t4+N-1;t5++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } } for (t6=2*t4+1;t6<=8*t1+7;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } for (t6=2*t4+1;t6<=8*t1+7;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(256*t3,4*t1+4),8*t1-8*t2+8);t4<=min(min(min(floord(16*t2-N+15,2),floord(512*t3-N+511,2)),floord(16*t1-16*t2+N+12,2)),T-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(ceild(16*t2-N+16,2),256*t3),4*t1+4),8*t1-8*t2+8);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+7);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } if ((N >= 3) && (t1 <= min(min(floord(16*t2-N+1,8),floord(8*t2+256*t3-N+249,8)),floord(16*t2+2*T-N-15,16))) && (t1 >= ceild(16*t2+512*t3-N-13,16))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=16*t1-16*t2+N+13;t6<=16*t1-16*t2+2*N+12;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=16*t1-16*t2+N+14;t6<=16*t1-16*t2+2*N+13;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } for (t4=max(max(ceild(512*t3-N+512,2),256*t3),4*t1+4);t4<=min(min(min(floord(16*t2-N+15,2),floord(16*t1-16*t2+N+12,2)),T-1),256*t3+255);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(max(ceild(16*t2-N+16,2),ceild(512*t3-N+512,2)),256*t3),4*t1+4),8*t1-8*t2+8);t4<=min(min(T-1,4*t1+7),256*t3+255);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if ((t1 <= min(min(floord(16*t2-N+1,8),floord(16*t2+2*T-N-15,16)),floord(16*t2+512*t3-N+497,16))) && (t1 >= max(ceild(8*t2+256*t3-N+251,8),ceild(16*t2+512*t3-N-13,16)))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=16*t1-16*t2+N+13;t6<=512*t3+511;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=16*t1-16*t2+N+14;t6<=512*t3+511;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } if ((N >= 2) && (t1 == 2*t2)) { for (t4=max(ceild(8*t1+N,2),256*t3);t4<=min(floord(8*t1-N+15,2),T-1);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=2*t4+N-1;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(8*t1+N,2),ceild(8*t1-N+16,2)),256*t3);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+7);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(512*t3-N+512,2),256*t3),4*t1+4);t4<=min(T-1,4*t1+7);t4++) { for (t6=2*t4;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 <= min(min(floord(16*t2-N,8),floord(16*t2+2*T-N-16,16)),floord(16*t2+512*t3-N+496,16))) { if (N%2 == 0) { for (t6=max(512*t3,16*t1-16*t2+N+14);t6<=min(512*t3+511,16*t1-16*t2+2*N+13);t6++) { A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[0][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[0][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14) == N-1 ? 0 : A[0][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[0][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[0][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[0][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[0][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[0][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } } } } } /* End of CLooG code */ #undef T #define T 1000 // #undef N // #define N 16000L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("%7.5lf", tdiff); //printf("|Time taken = %7.5lfs\n", tdiff ); //printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N *N * T) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { total+= A[T%2][i][j] ; } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { sum_err_sqr += (A[T%2][i][j] - (total/N))*(A[T%2][i][j] - (total/N)); } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { chtotal += ((char *)A[T%2][i])[j]; } } printf("|sum(rep(A)) = %d\n", chtotal); #endif return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/ // /* @ end @*/
vednnActivationForward.c
#include <stdio.h> #include <stdint.h> #include "vednnActivationForward.h" #ifdef VEDNN_USE_OPENMP #include <stdint.h> #include <omp.h> extern int __vednn_omp_num_threads ; #endif static inline vednnError_t vednnActivationForward_wrapper( vednnActivationForward_t pFunc, const void *pDataIn, void *pDataOut, const uint64_t nElements ) { #ifdef VEDNN_USE_OPENMP if ( __vednn_omp_num_threads == 1 ) { return pFunc(pDataIn, pDataOut, nElements) ; } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t eachNElement = nElements / nthreads ; int64_t remain = nElements % nthreads ; int64_t elementBegin = eachNElement * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myElement = eachNElement + ( threadid < remain ? 1 : 0 ) ; if( myElement == 0 ) { rc |= VEDNN_SUCCESS ; } else { float* _pDataIn = ((float *)pDataIn) + elementBegin ; float* _pDataOut = ((float *)pDataOut) + elementBegin ; rc |= pFunc((void*)_pDataIn, (void*) _pDataOut, myElement) ; } } return rc ; } #else return pFunc(pDataIn, pDataOut, nElements) ; #endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnActivationForward( const vednnActivationMode_t mode, const void *pDataIn, void *pDataOut, const uint64_t nElements ) { switch(mode) { case VEDNN_ACTIVATION_RELU : return vednnActivationForward_wrapper( vednnActivationForward_Relu, pDataIn, pDataOut, nElements ) ; default : fprintf(stderr, "VEDNN Error : vednnActivationForward : Invalid Parameter !!\n") ; return VEDNN_ERROR_INVALID_PARAM ; } }
omp_task.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_task() { int tids[NUM_TASKS]; int i; #pragma omp parallel { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { /* First we have to store the value of the loop index in a new variable * which will be private for each task because otherwise it will be overwritten * if the execution of the task takes longer than the time which is needed to * enter the next step of the loop! */ int myi; myi = i; #pragma omp task { my_sleep (SLEEPTIME); tids[myi] = omp_get_thread_num(); } /* end of omp task */ } /* end of for */ } /* end of single */ } /*end of parallel */ /* Now we ckeck if more than one thread executed the tasks. */ for (i = 1; i < NUM_TASKS; i++) { if (tids[0] != tids[i]) return 1; } return 0; } /* end of check_parallel_for_private */ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_task()) { num_failed++; } } return num_failed; }
generator_gemm_common.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke, Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include "generator_gemm_common.h" #include "generator_common.h" #include "generator_x86_instructions.h" #include "libxsmm_main.h" #include "generator_common_x86.h" LIBXSMM_API_INTERN void libxsmm_generator_gemm_apply_relu_to_vreg( libxsmm_generated_code* io_generated_code, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int zero_vreg, const unsigned int inout_vreg, const unsigned int store_bitmask, const unsigned int gpr_bitmask, const unsigned int store_bitmask_offset, const unsigned int is_32_bit_relu, const unsigned int aux_gpr, const unsigned int aux_vreg) { if (io_generated_code->arch < LIBXSMM_X86_AVX512) { if (is_32_bit_relu == 1) { if (store_bitmask == 1) { libxsmm_x86_instruction_vec_compute_3reg_imm8( io_generated_code, LIBXSMM_X86_INSTR_VCMPPS, i_micro_kernel_config->vector_name, zero_vreg, inout_vreg, aux_vreg, 6 ); libxsmm_x86_instruction_vec_compute_3reg_imm8( io_generated_code, LIBXSMM_X86_INSTR_VMOVMSKPS, i_micro_kernel_config->vector_name, aux_vreg, LIBXSMM_X86_VEC_REG_UNDEF, aux_gpr, 0 ); libxsmm_x86_instruction_alu_mem( io_generated_code, LIBXSMM_X86_INSTR_MOVB, gpr_bitmask, LIBXSMM_X86_GP_REG_UNDEF, 0, store_bitmask_offset, aux_gpr, 1); } libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VMAXPS, i_micro_kernel_config->vector_name, inout_vreg, zero_vreg, inout_vreg ); } else { /* shouldn't happen */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_UNSUP_DATATYPE ); return; } } else { if (store_bitmask == 0) { libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, (is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_VMAXPS : LIBXSMM_X86_INSTR_VPMAXSW, i_micro_kernel_config->vector_name, inout_vreg, zero_vreg, inout_vreg); } else { unsigned int current_mask_reg = 7; libxsmm_x86_instruction_vec_compute_3reg_imm8( io_generated_code, (is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_VCMPPS : LIBXSMM_X86_INSTR_VPCMPW, i_micro_kernel_config->vector_name, zero_vreg, inout_vreg, current_mask_reg, 6 ); /* Blend output result with zero reg based on relu mask */ libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code, (is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_VPBLENDMD : LIBXSMM_X86_INSTR_VPBLENDMW, i_micro_kernel_config->vector_name, inout_vreg, zero_vreg, inout_vreg, current_mask_reg, 0 ); /* Store bitmask */ libxsmm_x86_instruction_mask_move_mem( io_generated_code, (is_32_bit_relu == 1) ? LIBXSMM_X86_INSTR_KMOVW_ST : LIBXSMM_X86_INSTR_KMOVD_ST, gpr_bitmask, LIBXSMM_X86_GP_REG_UNDEF, 0, store_bitmask_offset, current_mask_reg ); } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( libxsmm_generated_code* io_generated_code, libxsmm_micro_kernel_config* i_micro_kernel_config_mod, const unsigned int scratch_gpr, const unsigned int in_vreg, const unsigned int out_vreg ) { /* Load accumulator from scratch */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config_mod->instruction_set, LIBXSMM_X86_INSTR_VMOVUPS, scratch_gpr, LIBXSMM_X86_GP_REG_UNDEF, 0, in_vreg * 64, i_micro_kernel_config_mod->vector_name, out_vreg, 0, 1, 0 ); /* Apply sigmoid */ if (io_generated_code->arch >= LIBXSMM_X86_AVX512) { libxsmm_generator_sigmoid_ps_rational_78_avx512( io_generated_code, out_vreg, i_micro_kernel_config_mod->vec_x2, i_micro_kernel_config_mod->vec_nom, i_micro_kernel_config_mod->vec_denom, i_micro_kernel_config_mod->mask_hi, i_micro_kernel_config_mod->mask_lo, i_micro_kernel_config_mod->vec_c0, i_micro_kernel_config_mod->vec_c1, i_micro_kernel_config_mod->vec_c2, i_micro_kernel_config_mod->vec_c3, i_micro_kernel_config_mod->vec_c1_d, i_micro_kernel_config_mod->vec_c2_d, i_micro_kernel_config_mod->vec_c3_d, i_micro_kernel_config_mod->vec_hi_bound, i_micro_kernel_config_mod->vec_lo_bound, i_micro_kernel_config_mod->vec_ones, i_micro_kernel_config_mod->vec_neg_ones, i_micro_kernel_config_mod->vec_halves ); } else { libxsmm_generator_sigmoid_ps_rational_78_avx( io_generated_code, out_vreg, i_micro_kernel_config_mod->vec_x2, i_micro_kernel_config_mod->vec_nom, i_micro_kernel_config_mod->vec_denom, i_micro_kernel_config_mod->vec_c0, i_micro_kernel_config_mod->vec_c1, i_micro_kernel_config_mod->vec_c2, i_micro_kernel_config_mod->vec_c3, i_micro_kernel_config_mod->vec_c1_d, i_micro_kernel_config_mod->vec_c2_d, i_micro_kernel_config_mod->vec_c3_d, i_micro_kernel_config_mod->vec_hi_bound, i_micro_kernel_config_mod->vec_lo_bound, i_micro_kernel_config_mod->vec_ones, i_micro_kernel_config_mod->vec_neg_ones); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_restore_2D_regblock_from_scratch( libxsmm_generated_code* io_generated_code, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int scratch_gpr, const unsigned int l_vec_reg_acc_start, const unsigned int l_m_blocking, const unsigned int i_n_blocking) { unsigned int l_n, l_m; for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVUPS, scratch_gpr, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_vec_reg_acc_start + l_m + (l_m_blocking * l_n)) * 64, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, 0, 0 ); } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_store_2D_regblock_to_scratch( libxsmm_generated_code* io_generated_code, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int scratch_gpr, const unsigned int l_vec_reg_acc_start, const unsigned int l_m_blocking, const unsigned int i_n_blocking) { unsigned int l_n, l_m; for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVUPS, scratch_gpr, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_vec_reg_acc_start + l_m + (l_m_blocking * l_n)) * 64, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, 0, 1 ); } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( libxsmm_generated_code* io_generated_code, libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int l_vec_reg_acc_start, const unsigned int l_m_blocking, const unsigned int i_n_blocking, const unsigned int scratch_gpr, const unsigned int aux_gpr) { unsigned int n_avail_vregs = (io_generated_code->arch >= LIBXSMM_X86_AVX512) ? 32 : 16; unsigned int n_avail_masks = (io_generated_code->arch >= LIBXSMM_X86_AVX512) ? 8 : 16; /* First dump the accumulators to scratch and then setup sigmoid coeffcients to be reused */ libxsmm_x86_instruction_push_reg( io_generated_code, scratch_gpr); libxsmm_x86_instruction_push_reg( io_generated_code, aux_gpr ); libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR, scratch_gpr); libxsmm_generator_gemm_store_2D_regblock_to_scratch( io_generated_code, i_micro_kernel_config, scratch_gpr, l_vec_reg_acc_start, l_m_blocking, i_n_blocking); libxsmm_generator_gemm_prepare_coeffs_sigmoid_ps_rational_78_avx_avx512( io_generated_code, i_micro_kernel_config, n_avail_vregs, n_avail_masks, aux_gpr ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_prepare_relu_fusion( libxsmm_generated_code* io_generated_code, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int zero_vreg, const unsigned int store_bitmask, const unsigned int bitmask_gpr, const unsigned int aux_gpr) { /* Zero out register 0 to perform relu */ libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, zero_vreg, zero_vreg, zero_vreg); if (store_bitmask == 1) { libxsmm_x86_instruction_push_reg( io_generated_code, bitmask_gpr ); if (io_generated_code->arch < LIBXSMM_X86_AVX512) { libxsmm_x86_instruction_push_reg( io_generated_code, aux_gpr ); } libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, bitmask_gpr ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_cleanup_relu_fusion( libxsmm_generated_code* io_generated_code, const unsigned int store_bitmask, const unsigned int bitmask_gpr, const unsigned int aux_gpr) { if (store_bitmask == 1) { if (io_generated_code->arch < LIBXSMM_X86_AVX512) { libxsmm_x86_instruction_pop_reg( io_generated_code, aux_gpr ); } libxsmm_x86_instruction_pop_reg( io_generated_code, bitmask_gpr); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_cleanup_sigmoid_fusion( libxsmm_generated_code* io_generated_code, const unsigned int scratch_gpr, const unsigned int aux_gpr ) { libxsmm_x86_instruction_pop_reg( io_generated_code, aux_gpr ); libxsmm_x86_instruction_pop_reg( io_generated_code, scratch_gpr ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_load_colbias_to_2D_block( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, libxsmm_datatype colbias_precision, const unsigned int l_vec_reg_acc_start, const unsigned int l_m_blocking, const unsigned int i_n_blocking ) { unsigned int l_n = 0, l_m = 0; libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_2 ); for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { if (colbias_precision == LIBXSMM_DATATYPE_BF16) { if (l_n == 0) { /* Load bias vector */ /* load 16 bit values into xmm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_help_2, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_m * (i_micro_kernel_config->vector_length)) * 2, ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z', l_vec_reg_acc_start + l_m, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_help_2, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_m * (i_micro_kernel_config->vector_length)) * 2, ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'x' : 'y', l_vec_reg_acc_start + l_m, 0, 1, 0 ); } /* convert 16 bit values into 32 bit (integer convert) */ libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPMOVSXWD, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m ); /* shift 16 bits to the left to generate valid FP32 numbers */ libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code, LIBXSMM_X86_INSTR_VPSLLD_I, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m, 16); } else { libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VMOVUPS, ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z', l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } } else if (colbias_precision == LIBXSMM_DATATYPE_F32) { if (l_n == 0) { /* Load bias vector */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_help_2, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_m * (i_micro_kernel_config->vector_length))) * 4, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 ); } else { libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VMOVUPS, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } } else { /* shouldn't happen */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_UNSUP_DATATYPE ); return; } } } libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_add_colbias_to_2D_block( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, libxsmm_datatype colbias_precision, const unsigned int l_vec_reg_acc_start, const unsigned int l_m_blocking, const unsigned int i_n_blocking ) { unsigned int l_n = 0, l_m = 0; libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_2 ); for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* Load bias vector */ if (colbias_precision == LIBXSMM_DATATYPE_BF16) { /* load 16 bit values into xmm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_help_2, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_m * (i_micro_kernel_config->vector_length)) * 2, ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_help_2, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_m * (i_micro_kernel_config->vector_length)) * 2, ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'x' : 'y', 0, 0, 1, 0 ); } /* convert 16 bit values into 32 bit (integer convert) */ libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPMOVSXWD, i_micro_kernel_config->vector_name, 0, 0 ); /* shift 16 bits to the left to generate valid FP32 numbers */ libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code, LIBXSMM_X86_INSTR_VPSLLD_I, i_micro_kernel_config->vector_name, 0, 0, 16); } else if (colbias_precision == LIBXSMM_DATATYPE_F32) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_help_2, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_m * (i_micro_kernel_config->vector_length))) * 4, i_micro_kernel_config->vector_name, 0, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 ); } else { /* shouldn't happen */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_UNSUP_DATATYPE ); return; } /* Add colbias */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VADDPS, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } } libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_prepare_coeffs_sigmoid_ps_rational_78_avx_avx512( libxsmm_generated_code* io_generated_code, libxsmm_micro_kernel_config* i_micro_kernel_config, unsigned int reserved_zmms, unsigned int reserved_mask_regs, unsigned int temp_reg ) { float pade78_sigm_array[16] = { 2027025.0f, 270270.0f, 6930.0f, 36.0f, 945945.0f, 51975.0f, 630.0f, 4.97f, -4.97f, 1.0f, -1.0f, 0.5f, 0.0f, 0.0f, 0.0f, 0.0f }; i_micro_kernel_config->vec_x2 = reserved_zmms - 1; i_micro_kernel_config->vec_nom = reserved_zmms - 2; i_micro_kernel_config->vec_denom = reserved_zmms - 3; i_micro_kernel_config->vec_c0 = reserved_zmms - 4; i_micro_kernel_config->vec_c1 = reserved_zmms - 5; i_micro_kernel_config->vec_c2 = reserved_zmms - 6; i_micro_kernel_config->vec_c3 = reserved_zmms - 7; i_micro_kernel_config->vec_c1_d = reserved_zmms - 8; i_micro_kernel_config->vec_c2_d = reserved_zmms - 9; i_micro_kernel_config->vec_c3_d = reserved_zmms - 10; i_micro_kernel_config->vec_hi_bound = reserved_zmms - 11; i_micro_kernel_config->vec_lo_bound = reserved_zmms - 12; i_micro_kernel_config->vec_ones = reserved_zmms - 13; i_micro_kernel_config->vec_neg_ones = reserved_zmms - 14; i_micro_kernel_config->vec_halves = reserved_zmms - 15; libxsmm_x86_instruction_full_vec_load_of_constants ( io_generated_code, (const unsigned char *) pade78_sigm_array, "pade78_sigm_array_", i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0); libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR, temp_reg ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVUPS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0, 0, 1, 1 ); if (io_generated_code->arch < LIBXSMM_X86_AVX512) { libxsmm_x86_instruction_full_vec_load_of_constants ( io_generated_code, (const unsigned char *) &pade78_sigm_array[8], "pade78_sigm_array2_", i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVUPS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 32, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0, 0, 1, 1 ); } libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c0, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 4, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c1, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c2, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 12, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c3, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c1_d, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 20, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c2_d, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_c3_d, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 28, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_hi_bound, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 32, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_lo_bound, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 36, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_ones, 0, 1, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 40, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_neg_ones, 0, 1, 0 ); if (io_generated_code->arch >= LIBXSMM_X86_AVX512) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, temp_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 44, i_micro_kernel_config->vector_name, i_micro_kernel_config->vec_halves, 0, 1, 0 ); } i_micro_kernel_config->mask_hi = reserved_mask_regs - 1; i_micro_kernel_config->mask_lo = reserved_mask_regs - 2; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars_v2( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gp_reg_mapping* i_gp_reg_mapping ) { int is_stride_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE) > 0) ? 1 : 0; int is_offset_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) > 0) ? 1 : 0; int is_address_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) > 0) ? 1 : 0; int is_brgemm = ((is_stride_brgemm == 1) || (is_offset_brgemm == 1) || (is_address_brgemm == 1)) ? 1 : 0; int has_scf = ((LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ))) ? 1 : 0; int has_A_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2_AHEAD || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD) ? 1 : 0; int has_B_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_PREFETCH_AL2CL2BL2_VIA_C ) ? 1 : 0; unsigned int temp_reg = LIBXSMM_X86_GP_REG_R10; if (has_scf == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 112, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, temp_reg ); } if (has_A_pf_ptr == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 56, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg ); } if (has_B_pf_ptr == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 88, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } if ((is_brgemm == 1) && ( i_micro_kernel_config->decompress_A == 1)) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_BRCOUNT, i_gp_reg_mapping->gp_reg_reduce_count ); } if (is_offset_brgemm == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 40, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR, temp_reg ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 72, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR, temp_reg ); } if (i_micro_kernel_config->fused_eltwise == 1) { if (i_micro_kernel_config->has_colbias_act_fused == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 128, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, temp_reg ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 104, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg ); } if (i_micro_kernel_config->decompress_A == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 48, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR, temp_reg ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 160, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF, temp_reg ); } if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 104, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg ); } if (i_micro_kernel_config->fused_relu_bwd == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 104, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, temp_reg ); } if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_help_1, LIBXSMM_X86_GP_REG_UNDEF, 0, 192, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B, temp_reg ); } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars(libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, libxsmm_micro_kernel_config* i_micro_kernel_config) { int is_stride_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE) > 0) ? 1 : 0; int is_offset_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) > 0) ? 1 : 0; int is_address_brgemm = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) > 0) ? 1 : 0; int is_brgemm = ((is_stride_brgemm == 1) || (is_offset_brgemm == 1) || (is_address_brgemm == 1)) ? 1 : 0; int has_scf = ((LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ))) ? 1 : 0; int has_A_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2_AHEAD || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD) ? 1 : 0; int has_B_pf_ptr = (i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_PREFETCH_AL2CL2BL2_VIA_C ) ? 1 : 0; unsigned int eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R11; unsigned int temp_reg = LIBXSMM_X86_GP_REG_R10; if (is_brgemm == 0) { /* GEMM (A, B, C, [scf, eltwise_struct, Apf, Bpf] */ if (has_scf == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, LIBXSMM_X86_GP_REG_RCX ); if (i_micro_kernel_config->fused_eltwise == 1) { eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R8; if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R9 ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 ); } } } else { if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R8 ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 ); } } } } else { if (i_micro_kernel_config->fused_eltwise == 1) { eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_RCX; if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R8 ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 ); } } } else { if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_RCX ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_RCX ); } } } } } else { if (i_micro_kernel_config->decompress_A == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RCX, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_BRCOUNT, temp_reg ); } if ((is_stride_brgemm == 1) || (is_address_brgemm == 1)) { /* BRGEMM_ADDR/STRIDE (A, B, C, cnt, [scf, eltwise_struct, Apf, Bpf] */ if (has_scf == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, LIBXSMM_X86_GP_REG_R8 ); if (i_micro_kernel_config->fused_eltwise == 1) { eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R9; if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } } else { if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R9 ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 ); } } } } else { if (i_micro_kernel_config->fused_eltwise == 1) { eltwise_struct_ptr_reg = LIBXSMM_X86_GP_REG_R8; if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R9 ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 ); } } } else { if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, LIBXSMM_X86_GP_REG_R8 ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R9 ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, LIBXSMM_X86_GP_REG_R8 ); } } } } } else { /* BRGEMM_OFFS (A, B, C, cnt, A_off, B_off, [scf, eltwise struct, Apf, Bpf] */ libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR, LIBXSMM_X86_GP_REG_R8 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR, LIBXSMM_X86_GP_REG_R9 ); if (has_scf == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_INT8_SCF, temp_reg ); if (i_micro_kernel_config->fused_eltwise == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, eltwise_struct_ptr_reg ); if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_10, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } } else { if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } } } else { if (i_micro_kernel_config->fused_eltwise == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, eltwise_struct_ptr_reg ); if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_9, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } } else { if (has_A_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFA_PTR, temp_reg ); if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_8, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } else { if (has_B_pf_ptr == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ARG_7, temp_reg ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_PFB_PTR, temp_reg ); } } } } } } if (i_micro_kernel_config->fused_eltwise == 1) { if (i_micro_kernel_config->has_colbias_act_fused == 1) { /* TODO: Optimize this copy to operate only in used fileds form struct... */ libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, temp_reg ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg ); } if (i_micro_kernel_config->decompress_A == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR, temp_reg ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF, temp_reg ); } if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, temp_reg ); } if (i_micro_kernel_config->fused_relu_bwd == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 32, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, temp_reg ); } if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, eltwise_struct_ptr_reg, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, temp_reg, 0 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B, temp_reg ); } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setup_stack_frame_allocate_scratch( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, libxsmm_micro_kernel_config* i_micro_kernel_config ) { unsigned int gemm_scratch_size = 0; unsigned int scratch_pad_size = 0; int l_emu_amx = 0; const char *const l_env_emu_amx = getenv("EMULATE_AMX"); if ( 0 == l_env_emu_amx ) { } else { l_emu_amx = atoi(l_env_emu_amx); } if (l_emu_amx > 0) { int expand_scratch_factor = (i_micro_kernel_config->n_tiles == 1) ? 2 : 1; i_micro_kernel_config->emulation_scratch_offset = expand_scratch_factor * i_xgemm_desc->n * i_xgemm_desc->ldc * 4 /*i_micro_kernel_config->datatype_size*/; gemm_scratch_size = expand_scratch_factor * i_xgemm_desc->n * i_xgemm_desc->ldc * 4 /*i_micro_kernel_config->datatype_size*/ + 8 * 32 * 32 + 32 * 64 ; if (LIBXSMM_DATATYPE_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype )) { i_micro_kernel_config->emulation_scratch_offset = 0; gemm_scratch_size = 8 * 32 * 32 + 32 * 64 ; } } else { if ((io_generated_code->arch >= LIBXSMM_X86_AVX512_SPR)) { int expand_scratch_factor = (i_micro_kernel_config->n_tiles == 1) ? 2 : 1; gemm_scratch_size = LIBXSMM_MAX(32*64, expand_scratch_factor * i_xgemm_desc->n * i_xgemm_desc->ldc * 4/*i_micro_kernel_config->datatype_size*/); } else { /* Allocate scratch for stashing 32 zmms */ if ( ((LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI) ) { gemm_scratch_size = 32 * 64; } } } scratch_pad_size = (gemm_scratch_size % 64 == 0) ? 0 : ((gemm_scratch_size + 63)/64) * 64 - gemm_scratch_size; gemm_scratch_size += scratch_pad_size; if (gemm_scratch_size > 0) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, LIBXSMM_X86_GP_REG_RSP, gemm_scratch_size ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR, LIBXSMM_X86_GP_REG_RSP ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setup_stack_frame( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, libxsmm_micro_kernel_config* i_micro_kernel_config ) { unsigned int temp_reg = LIBXSMM_X86_GP_REG_R10; libxsmm_x86_instruction_push_reg( io_generated_code, LIBXSMM_X86_GP_REG_RBP ); libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_RBP); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, LIBXSMM_X86_GP_REG_RSP, 88 ); if ( ((LIBXSMM_GEMM_FLAG_USE_XGEMM_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_ABI) || ((LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI) ) { libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars_v2( io_generated_code, i_xgemm_desc, i_micro_kernel_config, i_gp_reg_mapping ); } else { libxsmm_generator_gemm_setup_stack_frame_fill_stack_vars(io_generated_code, i_xgemm_desc, i_micro_kernel_config); } /* The stack now looks like this: * 10th param (if applicable) <-- RBP+80 * 9th param (if applicable) <-- RBP+72 * 8th param (if applicable) <-- RBP+64 * 7th param (if applicable) <-- RBP+56 * Return address <-- RBP+48 * Calle SAVED-regs <-- RBP[+8,+16,+24,+32,+40] * Entry/saved RBP <-- RBP * prefetch A ptr <-- RBP-8 * prefetch B ptr <-- RBP-16 * Offset A array ptr <-- RBP-24 * Offset B array ptr <-- RBP-32 * Int8 scaling factor <-- RBP-40 * GEMM_scratch ptr in stack (to be filled) <-- RBP-48 * Eltwise bias ptr <-- RBP-56 * Eltwise output_ptr <-- RBP-64 * Eltwise buf1_ptr <-- RBP-72 * Eltwise buf2_ptr <-- RBP-80 * Batch-reduce count <-- RBP-88, RSP * */ /* Now align RSP to 64 byte boundary */ libxsmm_x86_instruction_alu_imm_i64( io_generated_code, i_micro_kernel_config->alu_mov_instruction, temp_reg, 0xFFFFFFFFFFFFFFC0 ); libxsmm_x86_instruction_alu_reg( io_generated_code, LIBXSMM_X86_INSTR_ANDQ, temp_reg, LIBXSMM_X86_GP_REG_RSP); /* Now alllocate in stack required GEMM scratch if necessary*/ libxsmm_generator_gemm_setup_stack_frame_allocate_scratch( io_generated_code, i_xgemm_desc, i_micro_kernel_config ); /* The stack at exit of setup looks like this: * * 10th param (if applicable) <-- RBP+80 * 9th param (if applicable) <-- RBP+72 * 8th param (if applicable) <-- RBP+64 * 7th param (if applicable) <-- RBP+56 * Return address <-- RBP+48 * Calle SAVED-regs <-- RBP[+8,+16,+24,+32,+40] * Entry/saved RBP <-- RBP * prefetch A ptr <-- RBP-8 * prefetch B ptr <-- RBP-16 * Offset A array ptr <-- RBP-24 * Offset B array ptr <-- RBP-32 * Int8 scaling factor <-- RBP-40 * GEMM_scratch ptr in stack <-- RBP-48 * Eltwise bias ptr <-- RBP-56 * Eltwise output_ptr <-- RBP-64 * Eltwise buf1_ptr <-- RBP-72 * Eltwise buf2_ptr <-- RBP-80 * Batch-reduce count <-- RBP-88, RSP * [ Potentianl pad for 64b align ] * GEMM scratch, 64b aligned <-- (RBP-48) contains this address * */ } LIBXSMM_API_INTERN void libxsmm_generator_gemm_destroy_stack_frame( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config ) { LIBXSMM_UNUSED(i_xgemm_desc); LIBXSMM_UNUSED(i_gp_reg_mapping); libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_RSP); libxsmm_x86_instruction_pop_reg( io_generated_code, LIBXSMM_X86_GP_REG_RBP ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(const libxsmm_gemm_descriptor* i_xgemm_desc, libxsmm_micro_kernel_config* i_micro_kernel_config ) { i_micro_kernel_config->fused_bcolbias = 0; i_micro_kernel_config->fused_scolbias = 0; i_micro_kernel_config->fused_relu = 0; i_micro_kernel_config->fused_relu_nobitmask = 0; i_micro_kernel_config->fused_relu_bwd = 0; i_micro_kernel_config->fused_sigmoid = 0; i_micro_kernel_config->overwrite_C = 1; i_micro_kernel_config->vnni_format_C = 0; i_micro_kernel_config->decompress_A = 0; i_micro_kernel_config->sparsity_factor_A = 1; i_micro_kernel_config->vnni_cvt_output_ext_buf = 0; i_micro_kernel_config->norm_to_normT_B_ext_buf = 0; i_micro_kernel_config->stride_b_trans = 0; i_micro_kernel_config->fused_eltwise = 0; i_micro_kernel_config->has_colbias_act_fused = 0; if ( ((LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI & i_xgemm_desc->flags) == LIBXSMM_GEMM_FLAG_USE_XGEMM_EXT_ABI) ) { i_micro_kernel_config->overwrite_C = ((i_xgemm_desc->internal_flags_2 & 0x4) > 0) ? 0 : 1; if (i_xgemm_desc->eltw_cp_op == LIBXSMM_MELTW_OPERATION_UNARY) { if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_RELU) { i_micro_kernel_config->has_colbias_act_fused = 1; if ((i_xgemm_desc->eltw_cp_flags & LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT) > 0){ i_micro_kernel_config->fused_relu = 1; } else { i_micro_kernel_config->fused_relu_nobitmask = 1; } } if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_SIGMOID) { i_micro_kernel_config->has_colbias_act_fused = 1; i_micro_kernel_config->fused_sigmoid = 1; } if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI) { i_micro_kernel_config->vnni_format_C = 1; if (i_micro_kernel_config->overwrite_C == 0) { i_micro_kernel_config->vnni_cvt_output_ext_buf = 1; } } if (i_xgemm_desc->eltw_cp_param == LIBXSMM_MELTW_TYPE_UNARY_RELU_INV) { i_micro_kernel_config->has_colbias_act_fused = 1; i_micro_kernel_config->fused_relu_bwd = 1; } } if (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_BINARY) { if (i_xgemm_desc->meltw_param == LIBXSMM_MELTW_TYPE_BINARY_ADD) { if (((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_0) > 0 ) || ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1) > 0 )) { i_micro_kernel_config->has_colbias_act_fused = 1; if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_BF16) { i_micro_kernel_config->fused_bcolbias = 1; } if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_F32) { i_micro_kernel_config->fused_scolbias = 1; } } } } if (i_xgemm_desc->eltw_ap_op == LIBXSMM_MELTW_OPERATION_UNARY) { if ((i_xgemm_desc->internal_flags_2 & 0x1) > 0){ if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_1) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = 1; } if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_2) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = 2; } if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_4) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = 4; } if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_8) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = 8; } if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_16) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = 16; } if (i_xgemm_desc->eltw_ap_param == LIBXSMM_MELTW_TYPE_UNARY_DECOMPRESS_SPARSE_FACTOR_32) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = 32; } } } if (i_xgemm_desc->eltw_bp_op == LIBXSMM_MELTW_OPERATION_UNARY) { if (i_xgemm_desc->eltw_bp_param == LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT) { if ((i_xgemm_desc->internal_flags_2 & 0x2) > 0){ i_micro_kernel_config->norm_to_normT_B_ext_buf = 1; i_micro_kernel_config->stride_b_trans = i_xgemm_desc->ldbp; } } } i_micro_kernel_config->fused_eltwise = (i_micro_kernel_config->has_colbias_act_fused == 1) ? 1: 0; if (i_micro_kernel_config->decompress_A == 1) { i_micro_kernel_config->fused_eltwise = 1; } if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) { i_micro_kernel_config->fused_eltwise = 1; } if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) { i_micro_kernel_config->fused_eltwise = 1; } if (i_micro_kernel_config->fused_relu_bwd == 1) { i_micro_kernel_config->fused_eltwise = 1; } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setup_fusion_microkernel_properties(const libxsmm_gemm_descriptor* i_xgemm_desc, libxsmm_micro_kernel_config* i_micro_kernel_config ) { i_micro_kernel_config->fused_bcolbias = 0; i_micro_kernel_config->fused_scolbias = 0; i_micro_kernel_config->fused_relu = 0; i_micro_kernel_config->fused_relu_nobitmask = 0; i_micro_kernel_config->fused_relu_bwd = 0; i_micro_kernel_config->fused_sigmoid = 0; i_micro_kernel_config->overwrite_C = 0; i_micro_kernel_config->vnni_format_C = 0; i_micro_kernel_config->decompress_A = 0; i_micro_kernel_config->sparsity_factor_A = 1; i_micro_kernel_config->vnni_cvt_output_ext_buf = 0; i_micro_kernel_config->norm_to_normT_B_ext_buf = 0; i_micro_kernel_config->stride_b_trans = 0; i_micro_kernel_config->fused_eltwise = 0; i_micro_kernel_config->has_colbias_act_fused = 0; if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_DECOMPRESS_A) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_ACT_TRANSFORM_C_NORM_TO_VNNI_EXT_BUFFER)) { if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_OVERWRITE_C) > 0) { i_micro_kernel_config->overwrite_C = 1; } if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU) > 0) { i_micro_kernel_config->fused_relu = 1; } if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_NOBITMASK) > 0) { i_micro_kernel_config->fused_relu_nobitmask = 1; } if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_BWD) > 0) { i_micro_kernel_config->fused_relu_bwd = 1; } if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_SIGM) > 0) { i_micro_kernel_config->fused_sigmoid = 1; } if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_COLBIAS) > 0) { if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_BF16) { i_micro_kernel_config->fused_bcolbias = 1; } if (i_xgemm_desc->meltw_datatype_aux == LIBXSMM_DATATYPE_F32) { i_micro_kernel_config->fused_scolbias = 1; } } } else { i_micro_kernel_config->overwrite_C = 1; i_micro_kernel_config->vnni_format_C = ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_C) > 0) ? 1 : 0; } /* Determine if we have to decompress A... */ if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_DECOMPRESS_A) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_DECOMPRESS_A)) { i_micro_kernel_config->decompress_A = 1; i_micro_kernel_config->sparsity_factor_A = i_xgemm_desc->meltw_param; } if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_DECOMPRESS_A)) { if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_BWD) > 0) { i_micro_kernel_config->fused_relu_bwd = 1; } i_micro_kernel_config->has_colbias_act_fused = 1; if (i_xgemm_desc->meltw_flags == (unsigned int)LIBXSMM_MELTW_FLAG_NONE) { i_micro_kernel_config->has_colbias_act_fused = 0; } } if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_TRANSFORM_C_NORM_TO_VNNI_EXT_BUFFER) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_ACT_TRANSFORM_C_NORM_TO_VNNI_EXT_BUFFER)) { i_micro_kernel_config->vnni_cvt_output_ext_buf = 1; if ((i_xgemm_desc->meltw_flags & LIBXSMM_MELTW_FLAG_ACT_RELU_BWD) > 0) { i_micro_kernel_config->fused_relu_bwd = 1; } } if ((i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_TRANSFORM_B_NORM_TO_NORMT_EXT_BUFFER) || (i_xgemm_desc->meltw_operation == LIBXSMM_MELTW_OPERATION_COLBIAS_ACT_TRANSFORM_B_NORM_TO_NORMT_EXT_BUFFER)) { i_micro_kernel_config->norm_to_normT_B_ext_buf = 1; } i_micro_kernel_config->fused_eltwise = (i_micro_kernel_config->has_colbias_act_fused == 1) ? 1: 0; if (i_micro_kernel_config->decompress_A == 1) { i_micro_kernel_config->fused_eltwise = 1; } if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) { i_micro_kernel_config->fused_eltwise = 1; } if (i_micro_kernel_config->norm_to_normT_B_ext_buf == 1) { i_micro_kernel_config->fused_eltwise = 1; i_micro_kernel_config->stride_b_trans = i_xgemm_desc->meltw_ldy; } if (i_micro_kernel_config->fused_relu_bwd == 1) { i_micro_kernel_config->fused_eltwise = 1; } } LIBXSMM_API_INTERN int libxsmm_generator_gemm_get_rbp_relative_offset( libxsmm_gemm_stack_var stack_var ) { /* The stack at exit of setup looks like this: * * 10th param (if applicable) <-- RBP+40 * 9th param (if applicable) <-- RBP+32 * 8th param (if applicable) <-- RBP+24 * 7th param (if applicable) <-- RBP+16 * Return address <-- RBP+8 * Entry/saved RBP <-- RBP * prefetch A ptr <-- RBP-8 * prefetch B ptr <-- RBP-16 * Offset A array ptr <-- RBP-24 * Offset B array ptr <-- RBP-32 * Int8 scaling factor <-- RBP-40 * GEMM_scratch ptr in stack (to be filled) <-- RBP-48 * Eltwise bias ptr <-- RBP-56 * Eltwise output_ptr <-- RBP-64 * Eltwise buf1_ptr <-- RBP-72 * Eltwise buf2_ptr <-- RBP-80 * Batch-reduce count <-- RBP-88 * */ switch ( stack_var ) { case LIBXSMM_GEMM_STACK_VAR_NONE: return 0; case LIBXSMM_GEMM_STACK_VAR_PFA_PTR: return -8; case LIBXSMM_GEMM_STACK_VAR_PFB_PTR: return -16; case LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR: return -24; case LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR: return -32; case LIBXSMM_GEMM_STACK_VAR_INT8_SCF: return -40; case LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR: return -48; case LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR: return -56; case LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR: return -64; case LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR: return -72; case LIBXSMM_GEMM_STACK_VAR_ELT_BUF1: return -72; case LIBXSMM_GEMM_STACK_VAR_ELT_BUF2: return -80; case LIBXSMM_GEMM_STACK_VAR_BRCOUNT: return -88; case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B: return -72; case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_C: return -80; case LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR: return -72; case LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF: return -80; case LIBXSMM_GEMM_STACK_VAR_ARG_7: return 56; case LIBXSMM_GEMM_STACK_VAR_ARG_8: return 64; case LIBXSMM_GEMM_STACK_VAR_ARG_9: return 72; case LIBXSMM_GEMM_STACK_VAR_ARG_10: return 80; default: return 0; } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_getval_stack_var( libxsmm_generated_code* io_generated_code, const libxsmm_micro_kernel_config* i_micro_kernel_config, libxsmm_gemm_stack_var stack_var, unsigned int i_gp_reg ) { int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var); /* make sure we requested a legal stack var */ if (offset == 0) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL ); return; } libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 0 ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_setval_stack_var( libxsmm_generated_code* io_generated_code, const libxsmm_micro_kernel_config* i_micro_kernel_config, libxsmm_gemm_stack_var stack_var, unsigned int i_gp_reg ) { int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var); /* make sure we requested to set a legal stack var */ if (offset >= 0) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL ); return; } libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 1 ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */ libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(i_xgemm_desc, io_micro_kernel_config); if ( (i_arch <= LIBXSMM_TARGET_ARCH_GENERIC) || (i_arch > LIBXSMM_X86_ALLFEAT) ) { io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size_in = 0; io_micro_kernel_config->datatype_size_out = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE42 ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 2; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD; } if ( i_arch == LIBXSMM_X86_GENERIC ) { io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPD; } else { io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD; } else { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS; } } else if ( i_arch <= LIBXSMM_X86_AVX2 ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'y'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } } else { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } } } else if ( i_arch < LIBXSMM_X86_AVX512) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 32; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'y'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else if ( LIBXSMM_DATATYPE_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 4; if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 2; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 4; if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 1; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) > 0) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 4; if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 2; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) == 0) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 2; if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 2; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTW; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { /* shouldn't happen as we caught this case earlier */ io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size_in = 0; io_micro_kernel_config->datatype_size_out = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 32; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'z'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else if ( LIBXSMM_DATATYPE_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size_in = 4; if ( LIBXSMM_DATATYPE_I16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 2; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size_in = 4; if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 1; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) > 0) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size_in = 4; if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 2; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else if ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) && ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_VNNI_A) == 0) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size_in = 2; if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->datatype_size_out = 2; } else { io_micro_kernel_config->datatype_size_out = 4; } if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTW; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { /* shouldn't happen as we caught this case earlier */ io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size_in = 0; io_micro_kernel_config->datatype_size_out = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { /* that should no happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(i_xgemm_desc, io_micro_kernel_config); if ( (i_arch <= LIBXSMM_TARGET_ARCH_GENERIC) || (i_arch > LIBXSMM_X86_ALLFEAT) ) { io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size_in = 0; io_micro_kernel_config->datatype_size_out = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE42 ) { #if !defined(NDEBUG) fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n"); #endif libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c ); } else if ( i_arch <= LIBXSMM_X86_AVX2 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 2; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { #if !defined(NDEBUG) fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n"); #endif libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c ); } else { /* should not happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { libxsmm_generator_gemm_setup_fusion_microkernel_properties_v2(i_xgemm_desc, io_micro_kernel_config); if ( ( i_arch <= LIBXSMM_TARGET_ARCH_GENERIC ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) { io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size_in = 0; io_micro_kernel_config->datatype_size_out = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE42 ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD; } else { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS; } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size_in = 8; io_micro_kernel_config->datatype_size_out = 8; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size_in = 4; io_micro_kernel_config->datatype_size_out = 4; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } } else { /* should not happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc ) { if ( io_generated_code->code_type == 0 ) { char l_new_code[512]; const unsigned int l_max_code_length = sizeof(l_new_code) - 1; int l_code_length = 0; l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_m_blocking, const unsigned int i_k_blocking ) { LIBXSMM_UNUSED(i_m_blocking); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_max_blocked_k, const unsigned int i_kloop_complete ) { LIBXSMM_UNUSED(i_m_blocking); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); if ( i_kloop_complete != 0 ) { int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size_in; } else { l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size_in; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_b, l_b_offset ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc) { LIBXSMM_UNUSED(i_xgemm_desc); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1); libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_n_init, const unsigned int i_n_blocking) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_init ); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_n_blocking, const unsigned int i_n_done ) { if ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { if (i_micro_kernel_config->vnni_format_C == 0) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*2 /*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 /*(i_micro_kernel_config->datatype_size/2)*/) ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*2 /*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 * 2 /*(i_micro_kernel_config->datatype_size/2)*/) ); } } else if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)/**(i_micro_kernel_config->datatype_size/4)*/) - ((i_xgemm_desc->m) /** (i_micro_kernel_config->datatype_size/4)*/) ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size_out)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_out)) ); } /* Also adjust eltwise pointers */ if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } if ((i_micro_kernel_config->fused_relu == 1) && (i_micro_kernel_config->overwrite_C == 1) ) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*i_xgemm_desc->ldc)/8 - ((i_xgemm_desc->m/8) ) ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*(i_xgemm_desc->ldc)*2/*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 * 2 /*(i_micro_kernel_config->datatype_size/2)*/) ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->fused_relu_bwd == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*i_xgemm_desc->ldc)/8 - ((i_xgemm_desc->m/8) ) ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } /* In this case also advance the output ptr */ if (i_micro_kernel_config->overwrite_C == 0) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, (i_n_blocking*(i_xgemm_desc->ldc)*2/*(i_micro_kernel_config->datatype_size/2)*/) - ((i_xgemm_desc->m) * 2 /*(i_micro_kernel_config->datatype_size/2)*/) ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->fused_bcolbias == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ( i_xgemm_desc->m * 2/*(i_micro_kernel_config->datatype_size/2)*/) ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->fused_scolbias == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ( i_xgemm_desc->m * 4/*i_micro_kernel_config->datatype_size*/) ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) { libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } /* B prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, (i_n_blocking*(i_xgemm_desc->ldc)*i_micro_kernel_config->datatype_size_in) - ((i_xgemm_desc->m)*i_micro_kernel_config->datatype_size_in) ); } } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size_out)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_out)) ); } #endif if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size_in; } else { l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size_in; } libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_b, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, l_b_offset ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_b, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); } libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } else { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size_in; } else { l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size_in; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b, l_b_offset ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) ); if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size_in)) ); } } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_m_init, const unsigned int i_m_blocking ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_init ); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_m_done ) { /* advance C pointer */ libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size_out) ); /* Also adjust eltwise pointers */ if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } if ((i_micro_kernel_config->fused_relu == 1) && (i_micro_kernel_config->overwrite_C == 1) ) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking/8 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking*2*2/*(i_micro_kernel_config->datatype_size/2)*/ ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->fused_relu_bwd == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking/8 ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->overwrite_C == 0) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking*2/*(i_micro_kernel_config->datatype_size/2)*/ ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->fused_bcolbias == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking * 2/*(i_micro_kernel_config->datatype_size/2)*/ ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if (i_micro_kernel_config->fused_scolbias == 1) { libxsmm_generator_gemm_getval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, i_m_blocking * 4 /*i_micro_kernel_config->datatype_size*/ ); libxsmm_generator_gemm_setval_stack_var( io_generated_code, i_micro_kernel_config, LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR, i_gp_reg_mapping->gp_reg_help_0 ); } if ((i_micro_kernel_config->fused_relu == 1) || (i_micro_kernel_config->vnni_cvt_output_ext_buf == 1) || (i_micro_kernel_config->fused_relu_bwd == 1) || (i_micro_kernel_config->fused_bcolbias == 1) || (i_micro_kernel_config->fused_scolbias == 1) || (i_micro_kernel_config->overwrite_C == 0)) { libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } /* C prefetch */ #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size_out) ); } #endif /* B prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*i_micro_kernel_config->datatype_size_in ); } } /* A prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) { if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size_in)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size_in)) ); } } /* advance A pointer */ if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size_in)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size_in) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size_in)) ); } /* loop handling */ libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_n_blocking ) { unsigned int l_m_blocking, l_vec_reg_acc_start; /* register blocking counter in n */ unsigned int l_n = 0; /* register blocking counter in m */ unsigned int l_m = 0; assert(0 < i_micro_kernel_config->vector_length); /* deriving register blocking from kernel config */ l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1; /* start register of accumulator */ l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking); #if !defined(NDEBUG) /* Do some test if it is possible to generate the requested code. This is not done in release mode and therefore bad things might happen.... HUAAH */ if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_GENERIC || i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE42 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) { if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256 ||i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX ||i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX) { if ( (i_n_blocking > 28) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 8) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else {} #if 0 if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK ); return; } #endif #endif /*!defined(NDEBUG)*/ /* load C accumulator */ if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */ /* pure BF16 kernel */ if ( ( ((i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT)) || ((i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 )) ) && ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* we add when scaling during conversion to FP32 */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* load 16 bit values into xmm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'x' : 'y', 0, 0, 1, 0 ); } /* convert 16 bit values into 32 bit (integer convert) */ libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPMOVSXWD, i_micro_kernel_config->vector_name, 0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); /* shift 16 bits to the left to generate valid FP32 numbers */ libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code, LIBXSMM_X86_INSTR_VPSLLD_I, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 16); } } /* Check if we have to add bias */ if (i_micro_kernel_config->fused_bcolbias == 1) { libxsmm_generator_gemm_add_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config, LIBXSMM_DATATYPE_BF16, l_vec_reg_acc_start, l_m_blocking, i_n_blocking ); } /* pure int8 kernel */ } else if ( ( ((i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT)) || ((i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ) && ( (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* we need to up convert int8 to int32 */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* load 16 bit values into xmm portion of the register*/ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU8, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX2) && ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512 ) ) ? 'y' : 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), 'x', 0, 0, 1, 0 ); } /* convert 8 bit values into 32 bit (integer convert) */ if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED) != 0 ) { libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPMOVZXBD, i_micro_kernel_config->vector_name, 0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } else { libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPMOVSXBD, i_micro_kernel_config->vector_name, 0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } } } } else { /* adding to C, so let's load C */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* we only mask the last m-blocked load */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 ); } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out)); } } #endif } /* Check if we have to add bias */ if (i_micro_kernel_config->fused_scolbias == 1) { libxsmm_generator_gemm_add_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config, LIBXSMM_DATATYPE_F32, l_vec_reg_acc_start, l_m_blocking, i_n_blocking ); } } } else { if (i_micro_kernel_config->fused_scolbias == 1) { libxsmm_generator_gemm_load_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config, LIBXSMM_DATATYPE_F32, l_vec_reg_acc_start, l_m_blocking, i_n_blocking ); } else if (i_micro_kernel_config->fused_bcolbias == 1) { libxsmm_generator_gemm_load_colbias_to_2D_block( io_generated_code, i_gp_reg_mapping, i_micro_kernel_config, LIBXSMM_DATATYPE_BF16, l_vec_reg_acc_start, l_m_blocking, i_n_blocking ); } else { /* overwriting C, so let's xout accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* @TODO: cannot migrate to new encoder as this is also SSE */ if ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) && LIBXSMM_DATATYPE_I32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype )){ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), 0, 1, 0 ); } else { if ( io_generated_code->arch >= LIBXSMM_X86_AVX ) { libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } else { libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } } } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out)); } } #endif } } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_n_blocking ) { /* deriving register blocking from kernel config */ unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1; /* register blocking counter in n */ unsigned int l_n = 0; /* register blocking counter in m */ unsigned int l_m = 0; /* start register of accumulator */ unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking); /* select store instruction */ unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction; libxsmm_micro_kernel_config l_micro_kernel_config_mod; libxsmm_micro_kernel_config *i_micro_kernel_config_mod = (libxsmm_micro_kernel_config*) &l_micro_kernel_config_mod; memcpy(i_micro_kernel_config_mod, i_micro_kernel_config, sizeof(libxsmm_micro_kernel_config)); /* @TODO fix this test */ #if !defined(NDEBUG) if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_GENERIC || i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE42 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) { if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX) { if ( (i_n_blocking > 28) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 8) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set > LIBXSMM_X86_AVX512_VL256 ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_VL256 ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else {} #if 0 if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK ); return; } #endif #endif if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256) ) && ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { const unsigned int relu_bitmask_gpr = i_gp_reg_mapping->gp_reg_help_2; const unsigned int scratch_gpr = i_gp_reg_mapping->gp_reg_help_2; const unsigned int aux_gpr = i_gp_reg_mapping->gp_reg_help_1; const unsigned int aux_vreg = 1; const unsigned int zero_vreg = 0; /* Check out if fusion has to be applied */ if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { libxsmm_generator_gemm_prepare_relu_fusion( io_generated_code, i_micro_kernel_config, zero_vreg, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr); } else if (i_micro_kernel_config->fused_sigmoid == 1) { libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( io_generated_code, i_micro_kernel_config_mod, l_vec_reg_acc_start, l_m_blocking, i_n_blocking, scratch_gpr, aux_gpr); } for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8; libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config, zero_vreg, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg); } else if (i_micro_kernel_config->fused_sigmoid == 1) { unsigned int tmp_vreg = 0; libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod, scratch_gpr, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), tmp_vreg ); /* Store vreg back to scratch */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVUPS, scratch_gpr, LIBXSMM_X86_GP_REG_UNDEF, 0, (l_vec_reg_acc_start + l_m + (l_m_blocking * l_n)) * 64, i_micro_kernel_config->vector_name, tmp_vreg, 0, 0, 1 ); } } } if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { libxsmm_generator_gemm_cleanup_relu_fusion( io_generated_code, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr); } else if (i_micro_kernel_config->fused_sigmoid == 1) { /* Restore accumulators from scratch */ libxsmm_generator_gemm_restore_2D_regblock_from_scratch( io_generated_code, i_micro_kernel_config, scratch_gpr, l_vec_reg_acc_start, l_m_blocking, i_n_blocking); libxsmm_generator_gemm_cleanup_sigmoid_fusion( io_generated_code, scratch_gpr, aux_gpr ); } /* init stack with helper variables for SW-based RNE rounding */ /* push 0x7f800000 on the stack, naninf masking */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x7f800000); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* push 0x00010000 on the stack, fixup masking */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00010000); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* push 0x00007fff on the stack, rneadd */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00007fff); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* push 0x00000001 on the stack, fixup */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00000001); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); /* and with naninf */ libxsmm_x86_instruction_vec_compute_mem_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPANDD, i_micro_kernel_config->vector_name, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, 1, reg_X, 0 ); /* and with fixup */ libxsmm_x86_instruction_vec_compute_mem_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPANDD, i_micro_kernel_config->vector_name, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, 1, reg_X, 1 ); /* compute naninf mask k7 */ libxsmm_x86_instruction_vec_compute_mem_2reg_imm8( io_generated_code, LIBXSMM_X86_INSTR_VPCMPD, i_micro_kernel_config->vector_name, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 24, 1, 0, 7, 4 ); /* compute fixup mask k6 */ libxsmm_x86_instruction_vec_compute_mem_2reg_imm8( io_generated_code, LIBXSMM_X86_INSTR_VPCMPD, i_micro_kernel_config->vector_name, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 16, 1, 1, 6, 0 ); /* load rneadd */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 8, i_micro_kernel_config->vector_name, 0, 0, 1, 0 ); /* load fixup */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, LIBXSMM_X86_GP_REG_RSP, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, 1, 0, 1, 0 ); /* compute fixup */ libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code, LIBXSMM_X86_INSTR_VPADDD, i_micro_kernel_config->vector_name, 1, 0, 0, 6, 0 ); /* compute fixup */ libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code, LIBXSMM_X86_INSTR_VPADDD, i_micro_kernel_config->vector_name, 0, reg_X, reg_X, 7, 0 ); /* shift FP32 by 16bit to right */ libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code, LIBXSMM_X86_INSTR_VPSRAD_I, i_micro_kernel_config->vector_name, reg_X, reg_X, 16); /* shift FP32 by 16bit to right */ libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VPMOVDW, i_micro_kernel_config->vector_name, reg_X, 0 ); /* store 16 bit values into xmm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX) ) ? 'y' : 'z', 0, 2, 0, 1 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CLX) ) ? 'x' : 'y', 0, 0, 0, 1 ); } } } /* clean stack and restore help5 */ libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 ); } else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) && ((i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX)) ) && ( (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { const unsigned int relu_bitmask_gpr = i_gp_reg_mapping->gp_reg_help_2; const unsigned int scratch_gpr = i_gp_reg_mapping->gp_reg_help_2; const unsigned int aux_gpr = i_gp_reg_mapping->gp_reg_help_1; const unsigned int zero_vreg = 1; const unsigned int aux_vreg = 2; /* storing downconverted and rounded C accumulator */ /* Check out if fusion has to be applied */ if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { libxsmm_generator_gemm_prepare_relu_fusion( io_generated_code, i_micro_kernel_config, zero_vreg, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr); } else if (i_micro_kernel_config->fused_sigmoid == 1) { /* First dump the accumulators to scratch and then setup sigmoid coeffcients to be reused */ libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( io_generated_code, i_micro_kernel_config_mod, l_vec_reg_acc_start, l_m_blocking, i_n_blocking, scratch_gpr, aux_gpr); } for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { unsigned int l_m_2_blocking = (l_m_blocking/2)*2; l_m = 0; if ( i_micro_kernel_config->use_masking_a_c != 0 ) { for ( l_m = 0 ; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8; libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config, zero_vreg, reg_X, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg); } else if (i_micro_kernel_config->fused_sigmoid == 1) { unsigned int tmp_vreg = 0; libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod, scratch_gpr, reg_X, tmp_vreg ); reg_X = tmp_vreg; } libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VCVTNEPS2BF16, i_micro_kernel_config->vector_name, reg_X, 0 ); /* store 16 bit values into ymm portion of the register bfloat mask fix can lead to errors x should not be masked */ if ( l_m == (l_m_blocking - 1) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'y' : 'z', 0, 2, 0, 1 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'x' : 'y', 0, 0, 0, 1 ); } } } else { for (; l_m < l_m_2_blocking; l_m+=2 ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n); if (i_micro_kernel_config->fused_sigmoid == 1) { unsigned int tmp_vreg = 0; unsigned int tmp_vreg2 = 1; libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod, scratch_gpr, reg_X, tmp_vreg ); libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod, scratch_gpr, reg_X2, tmp_vreg2 ); reg_X = tmp_vreg; reg_X2 = tmp_vreg2; } libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VCVTNE2PS2BF16, i_micro_kernel_config->vector_name, reg_X, reg_X2, 0 ); if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8; libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config, zero_vreg, 0, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 0, aux_gpr, aux_vreg); } libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'y' : 'z', 0, 0, 0, 1 ); } for (; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8; libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config, zero_vreg, reg_X, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg); } else if (i_micro_kernel_config->fused_sigmoid == 1) { unsigned int tmp_vreg = 0; libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod, scratch_gpr, reg_X, tmp_vreg ); reg_X = tmp_vreg; } libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VCVTNEPS2BF16, i_micro_kernel_config->vector_name, reg_X, 0 ); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), ( i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_VL256_CPX ) ? 'x' : 'y', 0, 0, 0, 1 ); } } } if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { libxsmm_generator_gemm_cleanup_relu_fusion( io_generated_code, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr); } else if (i_micro_kernel_config->fused_sigmoid == 1) { libxsmm_generator_gemm_cleanup_sigmoid_fusion( io_generated_code, scratch_gpr, aux_gpr ); } } else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) && (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_VL256) ) && ( (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* pick the right instrucitons */ unsigned int inst_f32_i32 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VCVTPS2UDQ : LIBXSMM_X86_INSTR_VCVTPS2DQ; unsigned int inst_i32_i8 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VPMOVUSDB : LIBXSMM_X86_INSTR_VPMOVSDB; /* there are case where we need to load the scaling factor's address from the stack argument list */ if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) != 0 ) { libxsmm_x86_instruction_load_arg_to_reg( io_generated_code, 0, i_gp_reg_mapping->gp_reg_scf ); } /* loading scf into register 3 */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, i_gp_reg_mapping->gp_reg_scf, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, 3, 0, 1, 0 ); /* Zero out register 0 to perform relu */ libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, 0, 0, 0); /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); /* Convert result to F32 */ libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, LIBXSMM_X86_INSTR_VCVTDQ2PS, i_micro_kernel_config->vector_name, reg_X, reg_X ); /* Multiply with scaling factor */ libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VMULPS, i_micro_kernel_config->vector_name, reg_X, 3, reg_X ); /* Perform RELU */ libxsmm_x86_instruction_vec_compute_3reg( io_generated_code, LIBXSMM_X86_INSTR_VMAXPS, i_micro_kernel_config->vector_name, reg_X, 0, reg_X); /* Round result to int32 */ libxsmm_x86_instruction_vec_compute_2reg( io_generated_code, inst_f32_i32, i_micro_kernel_config->vector_name, reg_X, reg_X ); /* down-convert to int8 */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, inst_i32_i8, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), i_micro_kernel_config->vector_name, reg_X, ( ( l_m == (l_m_blocking - 1)) && ( i_micro_kernel_config->use_masking_a_c != 0 ) ) ? 2 : 0, 0, 1 ); } } } else { /* storing C accumulator */ const unsigned int relu_bitmask_gpr = i_gp_reg_mapping->gp_reg_help_2; const unsigned int scratch_gpr = i_gp_reg_mapping->gp_reg_help_2; const unsigned int aux_gpr = i_gp_reg_mapping->gp_reg_help_1; const unsigned int zero_vreg = 0; const unsigned int aux_vreg = 1; /* Check out if fusion has to be applied */ if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { libxsmm_generator_gemm_prepare_relu_fusion( io_generated_code, i_micro_kernel_config, zero_vreg, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr); } else if (i_micro_kernel_config->fused_sigmoid == 1) { libxsmm_generator_gemm_dump_2D_block_and_prepare_sigmoid_fusion( io_generated_code, i_micro_kernel_config_mod, l_vec_reg_acc_start, l_m_blocking, i_n_blocking, scratch_gpr, aux_gpr); } for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { unsigned int bitmask_offset = (io_generated_code->arch < LIBXSMM_X86_AVX512) ? ((l_n * i_xgemm_desc->ldc) + (l_m * 8))/8 : ((l_n * i_xgemm_desc->ldc) + (l_m * 16))/8; libxsmm_generator_gemm_apply_relu_to_vreg( io_generated_code, i_micro_kernel_config, zero_vreg, reg_X, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, bitmask_offset, 1, aux_gpr, aux_vreg); } else if (i_micro_kernel_config->fused_sigmoid == 1) { unsigned int tmp_vreg = 0; libxsmm_generator_gemm_apply_sigmoid_to_vreg_from_scratch( io_generated_code, i_micro_kernel_config_mod, scratch_gpr, reg_X, tmp_vreg ); reg_X = tmp_vreg; } libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out), i_micro_kernel_config->vector_name, reg_X, ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 ); } if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { /* determining how many prefetches we need in M direction as we just need one prefetch per cache line */ unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size_out)); /* 64: hardcoded cache line length */ for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size_out)); } } } } if ((i_micro_kernel_config->fused_relu_nobitmask == 1) || (i_micro_kernel_config->fused_relu == 1)) { libxsmm_generator_gemm_cleanup_relu_fusion( io_generated_code, i_micro_kernel_config->fused_relu, relu_bitmask_gpr, aux_gpr ); } else if (i_micro_kernel_config->fused_sigmoid == 1) { libxsmm_generator_gemm_cleanup_sigmoid_fusion( io_generated_code, scratch_gpr, aux_gpr ); } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code, const unsigned int i_gp_reg_tmp, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_mask_count ) { unsigned int l_mask; /* init full mask */ if( ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype)) && ((io_generated_code->arch == LIBXSMM_X86_AVX512_VL256 )|| (io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CPX )|| (io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CLX )) ){ l_mask = 0xf; } else if ( ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) || ((io_generated_code->arch == LIBXSMM_X86_AVX512_VL256 )|| (io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CPX )|| (io_generated_code->arch == LIBXSMM_X86_AVX512_VL256_CLX )) ){ l_mask = 0xff; } else { l_mask = 0xffff; } /* shift right by "inverse" remainder */ l_mask = l_mask >> i_mask_count; /* move mask to GP register */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_tmp, l_mask ); if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512_VL256 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVW_GPR_LD, i_gp_reg_tmp, LIBXSMM_X86_AVX512_MASK ); if ( ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_DATATYPE_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVD_GPR_LD, i_gp_reg_tmp, 2 ); } else if ( ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_DATATYPE_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVQ_GPR_LD, i_gp_reg_tmp, 2 ); } else { /* no addtional mask is needed */ } } else { /* shouldn't happen */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } }
LAGraph_pagerankx4.c
//------------------------------------------------------------------------------ // LAGraph_pagerankx4: pagerank using a real semiring //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_pagerankx4: GAP-style PageRank, with import/export // Tim Davis and Mohsen Aznaveh. // See also LAGraph_pagerank3f, for the same computation without import/export. // This version is just slightly faster than LAGraph_pagerank3f (perhaps 10% // at most, sometimes the difference is smaller). // This algorithm follows the specification given in the GAP Benchmark Suite: // https://arxiv.org/abs/1508.03619 which assumes that both A and A' are // already available, as are the row and column degrees. // The GAP Benchmark algorithm assumes the graph has no nodes with no out-going // edges (otherwise, a divide-by-zero occurs when dividing by d_out [i] below). // In terms of the adjacency matrix, it assumes there are no rows in A that // have no entries. // For fastest results, the input matrix should stored in GxB_BY_COL format. // TODO: or use AT by row, since the GAP assumes both A and A' are available. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include "LAGraph.h" #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) // no need for vi and wi #define LAGRAPH_FREE_WORK \ { \ LAGRAPH_FREE (vx) ; \ LAGRAPH_FREE (wx) ; \ LAGRAPH_FREE (prior) ; \ GrB_free (&v) ; \ GrB_free (&w) ; \ } #else #define LAGRAPH_FREE_WORK \ { \ LAGRAPH_FREE (vi) ; \ LAGRAPH_FREE (vx) ; \ LAGRAPH_FREE (wi) ; \ LAGRAPH_FREE (wx) ; \ LAGRAPH_FREE (prior) ; \ GrB_free (&v) ; \ GrB_free (&w) ; \ } #endif #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE_WORK ; \ GrB_free (result) ; \ } GrB_Info LAGraph_pagerankx4 // PageRank definition ( GrB_Vector *result, // output: array of LAGraph_PageRank structs GrB_Matrix A, // binary input graph, not modified const float *LA_RESTRICT d_out, // out degree of each node (GrB_FP32, size n) float damping, // damping factor (typically 0.85) int itermax, // maximum number of iterations int *iters // output: number of iterations taken ) { //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Info info ; GrB_Index n ; GrB_Vector v = NULL, w = NULL ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) // no need for vi and wi #else GrB_Index *vi = NULL, *wi = NULL ; #endif float *LA_RESTRICT vx = NULL ; float *LA_RESTRICT wx = NULL ; float *LA_RESTRICT prior = NULL ; GrB_Type type = GrB_FP32 ; (*result) = NULL ; LAGr_Matrix_nrows (&n, A) ; #if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \ && ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) ) GrB_Descriptor desc_t0 = GrB_DESC_T0 ; #else GrB_Descriptor desc_t0 = LAGraph_desc_tooo ; #endif const float teleport = (1 - damping) / n ; const float tol = 1e-4 ; float rdiff = 1 ; // first iteration is always done int nthreads = LAGraph_get_nthreads ( ) ; nthreads = LAGRAPH_MIN (n, nthreads) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // allocate workspace vx = LAGraph_malloc (n, sizeof (float)) ; wx = LAGraph_malloc (n, sizeof (float)) ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) // no need for vi and wi #else vi = LAGraph_malloc (n, sizeof (GrB_Index)) ; wi = LAGraph_malloc (n, sizeof (GrB_Index)) ; if (vi == NULL || wi == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } #endif prior = LAGraph_malloc (n, sizeof (float)) ; if (vx == NULL || prior == NULL || wx == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } // v = 1/n #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { vx [k] = 1.0 / n ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) // no need for vi and wi #else vi [k] = k ; wi [k] = k ; #endif } //-------------------------------------------------------------------------- // pagerank iterations //-------------------------------------------------------------------------- for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++) { // prior = v ; // v = damping * v ./ dout ; // w (:) = teleport #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t i = 0 ; i < n ; i++) { prior [i] = vx [i] ; vx [i] = damping * vx [i] / d_out [i] ; wx [i] = teleport ; } // import wx and wi into w // import vx and vi into v #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (&w, type, n, (void **) (&wx), NULL) ; LAGr_Vector_import_Full (&v, type, n, (void **) (&vx), NULL) ; #else LAGr_Vector_import (&w, type, n, n, &wi, (void **) (&wx), NULL) ; LAGr_Vector_import (&v, type, n, n, &vi, (void **) (&vx), NULL) ; #endif // w += A'*v LAGr_mxv (w, NULL, GrB_PLUS_FP32, GxB_PLUS_SECOND_FP32, A, v, desc_t0) ; // export w to vx and vi (the new score; note the swap) // export v to wx and wi (workspace for next iteration) #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_export_Full (&w, &type, &n, (void **) (&vx), NULL) ; LAGr_Vector_export_Full (&v, &type, &n, (void **) (&wx), NULL) ; #else LAGr_Vector_export (&w, &type, &n, &n, &vi, (void **) (&vx), NULL) ; LAGr_Vector_export (&v, &type, &n, &n, &wi, (void **) (&wx), NULL) ; #endif // check for convergence rdiff = 0 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:rdiff) for (int64_t i = 0 ; i < n ; i++) { rdiff += fabsf (prior [i] - vx [i]) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (result, type, n, (void **) (&vx), NULL) ; #else LAGr_Vector_import (result, type, n, n, &vi, (void **) (&vx), NULL) ; #endif LAGRAPH_FREE_WORK ; return (GrB_SUCCESS) ; }
core_dlaset.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlaset.c, normal z -> d, Fri Sep 28 17:38:22 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" // for memset function #include <string.h> /***************************************************************************//** * * @ingroup core_laset * * Sets the elements of the matrix A on the diagonal * to beta and on the off-diagonals to alpha * ******************************************************************************* * * @param[in] uplo * Specifies which elements of the matrix are to be set * - PlasmaUpper: Upper part of A is set; * - PlasmaLower: Lower part of A is set; * - PlasmaUpperLower: ALL elements of A are set. * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] alpha * The constant to which the off-diagonal elements are to be set. * * @param[in] beta * The constant to which the diagonal elements are to be set. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, A has been set accordingly. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dlaset(plasma_enum_t uplo, int m, int n, double alpha, double beta, double *A, int lda) { if (alpha == 0.0 && beta == 0.0 && uplo == PlasmaGeneral && m == lda) { // Use memset to zero continuous memory. memset((void*)A, 0, (size_t)m*n*sizeof(double)); } else { // Use LAPACKE_dlaset_work to initialize the matrix. LAPACKE_dlaset_work(LAPACK_COL_MAJOR, lapack_const(uplo), m, n, alpha, beta, A, lda); } } /******************************************************************************/ void plasma_core_omp_dlaset(plasma_enum_t uplo, int mb, int nb, int i, int j, int m, int n, double alpha, double beta, double *A) { #pragma omp task depend(out:A[0:mb*nb]) plasma_core_dlaset(uplo, m, n, alpha, beta, A+i+j*mb, mb); }
Jacobi1D-NaiveParallel-OMP_static.test.c
/****************************************************************************** * Jacobi1D benchmark * Basic parallelisation with OpenMP * * Usage: * make omp * export OMP_NUM_THREADS=8 * bin/Jacobi1D-NaiveParallel-OMP `cat src/$V.perfexecopts` * For a run on 8 threads ******************************************************************************/ #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <unistd.h> #include <getopt.h> #include <ctype.h> #include <stdbool.h> #include <assert.h> #include "util.h" // main // Given that this is a very straight forward benchark the code is almost // entirely kept within the main function. // The steps taken in this code are the following: // 1 - command line parsing // 2 - data allocation and initialization // 3 - jacobi 1D timed within an openmp loop // 4 - output and optional verification // int main( int argc, char* argv[] ){ // rather than calling fflush setbuf(stdout, NULL); // 1 - command line parsing Params cmdLineArgs; parseCmdLineArgs(&cmdLineArgs,argc,argv); // 2 - data allocation and initialization int lowerBound = 1; int upperBound = lowerBound + cmdLineArgs.problemSize - 1; double* space[2] = { NULL, NULL }; // allocate time-steps 0 and 1 space[0] = (double*) malloc( (cmdLineArgs.problemSize + 2) * sizeof(double)); space[1] = (double*) malloc( (cmdLineArgs.problemSize + 2) * sizeof(double)); if( space[0] == NULL || space[1] == NULL ){ printf( "Could not allocate space array\n" ); exit(0); } // use global seed to seed the random number gen (will be constant) srand(cmdLineArgs.globalSeed); // seed the space. int idx; // the randome number generator is not thread safe -- so first // set everything to 0 - respecting first touch #pragma omp parallel for private( idx ) schedule(static) for( idx = lowerBound-1; idx <= upperBound+1; ++idx ){ space[0][idx] = 0; } for( idx = lowerBound; idx <= upperBound; ++idx ){ space[0][idx] = rand() / (double)rand(); } // set halo values (sanity) space[0][0] = 0; space[0][upperBound+1] = 0; space[1][0] = 0; space[1][upperBound+1] = 0; // end allocate and initialize space // 3 - jacobi 1D timed within an openmp loop // Begin timed test int t, read = 0, write = 1; double start_time = omp_get_wtime(); for( t = 1; t <= cmdLineArgs.T; ++t ){ #pragma omp parallel for private( idx ) schedule(static) for( idx = lowerBound; idx <= upperBound; ++idx ){ space[write][idx] = (space[read][idx-1] + space[read][idx] + space[read][idx+1])/3; } read = write; write = 1 - write; } double end_time = omp_get_wtime(); double time = end_time - start_time; // End timed test // 4 - output and optional verification /* printf( "p: %d, T: %d, c:%d",cmdLineArgs.problemSize,cmdLineArgs.T, cmdLineArgs.cores); */ if( cmdLineArgs.printtime ){ printf( "Time: %f", time ); } if( cmdLineArgs.verify ){ if(!verifyResultJacobi1D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize, cmdLineArgs.globalSeed,cmdLineArgs.T )){ fprintf(stderr,"FAILURE\n"); }else{ fprintf(stderr,"SUCCESS\n"); } } return 0; }
GB_unop__identity_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_uint32) // op(A') function: GB (_unop_tran__identity_int16_uint32) // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_uint32) ( int16_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
count_func.c
/******************************************************************************* * 2pt/count_func.c: this file is part of the FCFC program. * FCFC: Fast Correlation Function Calculator. * Github repository: https://github.com/cheng-zhao/FCFC * Copyright (c) 2020 -- 2021 Cheng Zhao <zhaocheng03@gmail.com> [MIT license] *******************************************************************************/ #include "define.h" #include "load_conf.h" #include "count_func.h" #include "kdtree.h" #include <string.h> #include <stdint.h> #include <stdlib.h> #include <math.h> #ifdef OMP #include <omp.h> #endif /*============================================================================*\ Data structure for storing dual nodes \*============================================================================*/ typedef struct { const void *a; const void *b; } DUAL_NODE; typedef struct { DUAL_NODE *nodes; size_t size; size_t capacity; } STACK_DUAL_NODE; /*============================================================================*\ Functions for stack manipulation \*============================================================================*/ /****************************************************************************** Function `stack_push`: Push an element to the stack for dual nodes. Arguments: * `s`: pointer to the stack; * `a`: the first node to be pushed to the stack; * `b`: the second node to be pushed to the stack. ******************************************************************************/ static void stack_push(STACK_DUAL_NODE *s, const void *a, const void *b) { if (s->size >= s->capacity) { /* Enlarge the memory allocated for the stack. */ if (s->capacity) { if ((FCFC_STACK_MAX_SIZE >> 1) <= s->capacity) { P_EXT("too many elements to be pushed to the stack of dual nodes\n"); exit(FCFC_ERR_MEMORY); } s->capacity <<= 1; } else { /* initialise the stack */ s->nodes = NULL; s->capacity = FCFC_STACK_INIT_SIZE; } if (s->capacity <= s->size) { P_EXT("unable to expand the size of the stack of dual nodes\n"); exit(FCFC_ERR_UNKNOWN); } DUAL_NODE *tmp = realloc(s->nodes, s->capacity * sizeof *tmp); if (!tmp) { P_EXT("failed to allocate memory for the stack of dual nodes\n"); exit(FCFC_ERR_MEMORY); } s->nodes = tmp; } s->nodes[s->size].a = a; s->nodes[s->size++].b = b; } /****************************************************************************** Function `stack_pop`: Pop one pair of nodes from the stack for dual nodes. Arguments: * `s`: pointer to the stack. Return: Address of the dual nodes on the top of the stack on success; NULL on error. ******************************************************************************/ static inline DUAL_NODE *stack_pop(STACK_DUAL_NODE *s) { if (!s->size) return NULL; s->size -= 1; return s->nodes + s->size; } /****************************************************************************** Function `stack_destroy`: Deconstruct the stack for dual nodes. Arguments: * `s`: pointer to the stack. ******************************************************************************/ static void stack_destroy(STACK_DUAL_NODE *s) { if (!s->capacity) return; s->size = s->capacity = 0; if (s->nodes) free(s->nodes); } /*============================================================================*\ Functions for distance evaluations \*============================================================================*/ /****************************************************************************** Function `squared_distance`: Compute the squared Euclidean distance between two points in 3-D space. Arguments: * `a`: pointer to the first data point; * `b`: pointer to the second data point. Return: The squared Euclidean distance of the two points. ******************************************************************************/ static inline real squared_distance(const DATA *restrict a, const DATA *restrict b) { register real t = 2 * (a->x[0]*b->x[0] + a->x[1]*b->x[1] + a->x[2]*b->x[2]); return a->s + b->s - t; } /****************************************************************************** Function `squared_distance_alter`: Compute the squared Euclidean distance between two points in 3-D space. Arguments: * `a`: pointer to the first data point; * `b`: pointer to the second data point. Return: The squared Euclidean distance of the two points. ******************************************************************************/ static inline real squared_distance_alter(const DATA *restrict a, const DATA *restrict b) { register real dx = a->x[0] - b->x[0]; register real dy = a->x[1] - b->x[1]; register real dz = a->x[2] - b->x[2]; return dx * dx + dy * dy + dz * dz; } /****************************************************************************** Function `squared_distance_with_pi`: Compute the squared Euclidean distance between two points in 3-D space, and report also the squared radial distance. Arguments: * `a`: pointer to the first data point; * `b`: pointer to the second data point; * `pi`: the squared radial distance. Return: The squared Euclidean distance of the two points. ******************************************************************************/ static inline real squared_distance_with_pi(const DATA *restrict a, const DATA *restrict b, real *pi) { register real t = 2 * (a->x[0]*b->x[0] + a->x[1]*b->x[1] + a->x[2]*b->x[2]); register real s = a->s + b->s; register real d = a->s - b->s; *pi = d * d / (s + t); return s - t; } /****************************************************************************** Function `squared_distance_par_with_pi`: Compute the squared transverse and radial Euclidean distance between two points in 3-D. Arguments: * `a`: pointer to the first data point; * `b`: pointer to the second data point; * `pi`: the squared radial distance. Return: The squared transverse Euclidean distance of the two points. ******************************************************************************/ static inline real squared_distance_par_with_pi(const DATA *restrict a, const DATA *restrict b, real *pi) { register real t = 2 * (a->x[0]*b->x[0] + a->x[1]*b->x[1] + a->x[2]*b->x[2]); register real s = a->s + b->s; register real d = a->s - b->s; *pi = d * d / (s + t); return s - t - *pi; } /****************************************************************************** Function `min_squared_dist_between_box`: Compute the minimum squared distance between two boxes. Arguments: * `min1`: the lower corner of the first box; * `max1`: the upper corner of the first box; * `min2`: the lower corner of the second box; * `max2`: the upper corner of the second box. Return: The minimum squared distance between the two boxes. ******************************************************************************/ static inline real min_squared_dist_between_box(const DATA *restrict min1, const DATA *restrict max1, const DATA *restrict min2, const DATA *restrict max2) { register real sum = 0; register real d = (min1->x[0] < min2->x[0]) ? min2->x[0] - max1->x[0] : min1->x[0] - max2->x[0]; if (d > 0) sum += d * d; d = (min1->x[1] < min2->x[1]) ? min2->x[1] - max1->x[1] : min1->x[1] - max2->x[1]; if (d > 0) sum += d * d; d = (min1->x[2] < min2->x[2]) ? min2->x[2] - max1->x[2] : min1->x[2] - max2->x[2]; if (d > 0) sum += d * d; return sum; } /****************************************************************************** Function `max_squared_dist_between_box`: Compute the maximum squared distance between two boxes. Arguments: * `min1`: the lower corner of the first box; * `max1`: the upper corner of the first box; * `min2`: the lower corner of the second box; * `max2`: the upper corner of the second box. Return: The maximum squared distance between the two boxes. ******************************************************************************/ static inline real max_squared_dist_between_box(const DATA *restrict min1, const DATA *restrict max1, const DATA *restrict min2, const DATA *restrict max2) { register real sum = 0; register real d = (min1->x[0] + max1->x[0] < min2->x[0] + max2->x[0]) ? max2->x[0] - min1->x[0] : max1->x[0] - min2->x[0]; sum += d * d; d = (min1->x[1] + max1->x[1] < min2->x[1] + max2->x[1]) ? max2->x[1] - min1->x[1] : max1->x[1] - min2->x[1]; sum += d * d; d = (min1->x[2] + max1->x[2] < min2->x[2] + max2->x[2]) ? max2->x[2] - min1->x[2] : max1->x[2] - min2->x[2]; sum += d * d; return sum; } /****************************************************************************** Function `find_dist_bin`: Find the index of a squared distance in the bins, using binary search. Arguments: * `dist`: the given distance; * `rbin`: the array for distance bins; * `n`: the number of distance bins. Output: Index of the bin on success; SIZE_MAX if the bin is not found. ******************************************************************************/ static inline size_t find_dist_bin(const real dist, const real *restrict dbin, const int n) { size_t l, u; l = 0; u = n - 1; while (l <= u) { size_t i = (l + u) >> 1; if (dbin[i + 1] <= dist) l = i + 1; else if (dbin[i] > dist) u = i - 1; else return i; } return SIZE_MAX; } /*============================================================================*\ Pair counting functions from templates \*============================================================================*/ /* Clean all the relevant macros first */ #ifdef FCFC_TREE_TYPE #undef FCFC_TREE_TYPE #endif #ifdef FCFC_CNT_TYPE #undef FCFC_CNT_TYPE #endif #ifdef FCFC_BIN_TYPE #undef FCFC_BIN_TYPE #endif #ifdef FCFC_BIN_PREC #undef FCFC_BIN_PREC #endif #ifdef FCFC_BIN_SMIN #undef FCFC_BIN_SMIN #endif #ifdef FCFC_BIN_PMIN #undef FCFC_BIN_PMIN #endif #ifdef FCFC_CNT_WT #undef FCFC_CNT_WT #endif /******************************************************************************* k-D tree *******************************************************************************/ #define FCFC_TREE_TYPE FCFC_TREE_TYPE_KDTREE /** auto pair counts **/ /* kdtree_auto_iso_exact */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_exact_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_intbin */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_intbin_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_trunc */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_trunc_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_exact */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_exact_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_intbin */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_intbin_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_trunc */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_trunc_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_smin0_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_smin0_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_smin0_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_exact_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_exact_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_intbin_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_intbin_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_trunc_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_iso_trunc_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_exact_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_exact_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_intbin_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_intbin_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_trunc_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_smu_trunc_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_exact_smin0_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_intbin_smin0_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /* kdtree_auto_spi_trunc_smin0_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_AUTO #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #include "dual_tree.c" /** cross pair counts **/ /* kdtree_cross_iso_exact */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_exact_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_intbin */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_intbin_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_trunc */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_trunc_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_exact */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_exact_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_intbin */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_intbin_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_trunc */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_trunc_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_exact */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_smin0_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_smin0_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_smin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_smin0_pmin0 */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_NO_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_iso_exact_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_exact_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_intbin_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_intbin_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_trunc_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_iso_trunc_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_ISO #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_exact_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_exact_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_intbin_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_intbin_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_trunc_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_smu_trunc_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SMU #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_exact_smin0_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_EXACT #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_intbin_smin0_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_INTEG #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_smin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_NONZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_NONZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /* kdtree_cross_spi_trunc_smin0_pmin0_wt */ #define FCFC_CNT_TYPE FCFC_PAIR_COUNT_CROSS #define FCFC_BIN_TYPE FCFC_BIN_SPI #define FCFC_BIN_PREC FCFC_BIN_TRUNC #define FCFC_BIN_SMIN FCFC_BIN_MIN_ZERO #define FCFC_CNT_WT FCFC_CNT_WITH_WEIGHT #define FCFC_BIN_PMIN FCFC_BIN_MIN_ZERO #include "dual_tree.c" /*============================================================================*\ Interface for pair counting \*============================================================================*/ /****************************************************************************** Function `count_pairs`: Count cross pairs based on the k-D tree data structure. Arguments: * `tree1`: pointer to the root of the first k-D tree; * `tree2`: pointer to the root of the second k-D tree; * `cf`: structure for congifurations of correlation functions; * `cnt`: array for storing pair counts; * `isauto`: true for counting auto pairs; * `usewt`: apply weights for pair counts. ******************************************************************************/ void count_pairs(const void *tree1, const void *tree2, CF *cf, pair_count_t *cnt, bool isauto, bool usewt) { /* Choose the optimal pair counting function. */ void (*pair_count_func) (STACK_DUAL_NODE *, const CF *, pair_count_t *) = NULL; bool smin0 = (cf->sbin[0] < REAL_TOL && cf->sbin[0] > -REAL_TOL); if (isauto) { if (usewt) { if (cf->bintype == FCFC_BIN_ISO) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_auto_iso_exact_smin0_wt; else pair_count_func = kdtree_auto_iso_exact_wt; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_auto_iso_intbin_smin0_wt; else pair_count_func = kdtree_auto_iso_intbin_wt; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_auto_iso_trunc_smin0_wt; else pair_count_func = kdtree_auto_iso_trunc_wt; } } else if (cf->bintype == FCFC_BIN_SMU) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_auto_smu_exact_smin0_wt; else pair_count_func = kdtree_auto_smu_exact_wt; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_auto_smu_intbin_smin0_wt; else pair_count_func = kdtree_auto_smu_intbin_wt; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_auto_smu_trunc_smin0_wt; else pair_count_func = kdtree_auto_smu_trunc_wt; } } else { /* FCFC_BIN_SPI */ bool pmin0 = (cf->pbin[0] < REAL_TOL && cf->pbin[0] > -REAL_TOL); if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) { if (pmin0) pair_count_func = kdtree_auto_spi_exact_smin0_pmin0_wt; else pair_count_func = kdtree_auto_spi_exact_smin0_wt; } else { if (pmin0) pair_count_func = kdtree_auto_spi_exact_pmin0_wt; else pair_count_func = kdtree_auto_spi_exact_wt; } } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) { if (pmin0) pair_count_func = kdtree_auto_spi_intbin_smin0_pmin0_wt; else pair_count_func = kdtree_auto_spi_intbin_smin0_wt; } else { if (pmin0) pair_count_func = kdtree_auto_spi_intbin_pmin0_wt; else pair_count_func = kdtree_auto_spi_intbin_wt; } } else { /* FCFC_BIN_TRUNC */ if (smin0) { if (pmin0) pair_count_func = kdtree_auto_spi_trunc_smin0_pmin0_wt; else pair_count_func = kdtree_auto_spi_trunc_smin0_wt; } else { if (pmin0) pair_count_func = kdtree_auto_spi_trunc_pmin0_wt; else pair_count_func = kdtree_auto_spi_trunc_wt; } } } } else { /* !usewt */ if (cf->bintype == FCFC_BIN_ISO) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_auto_iso_exact_smin0; else pair_count_func = kdtree_auto_iso_exact; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_auto_iso_intbin_smin0; else pair_count_func = kdtree_auto_iso_intbin; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_auto_iso_trunc_smin0; else pair_count_func = kdtree_auto_iso_trunc; } } else if (cf->bintype == FCFC_BIN_SMU) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_auto_smu_exact_smin0; else pair_count_func = kdtree_auto_smu_exact; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_auto_smu_intbin_smin0; else pair_count_func = kdtree_auto_smu_intbin; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_auto_smu_trunc_smin0; else pair_count_func = kdtree_auto_smu_trunc; } } else { /* FCFC_BIN_SPI */ bool pmin0 = (cf->pbin[0] < REAL_TOL && cf->pbin[0] > -REAL_TOL); if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) { if (pmin0) pair_count_func = kdtree_auto_spi_exact_smin0_pmin0; else pair_count_func = kdtree_auto_spi_exact_smin0; } else { if (pmin0) pair_count_func = kdtree_auto_spi_exact_pmin0; else pair_count_func = kdtree_auto_spi_exact; } } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) { if (pmin0) pair_count_func = kdtree_auto_spi_intbin_smin0_pmin0; else pair_count_func = kdtree_auto_spi_intbin_smin0; } else { if (pmin0) pair_count_func = kdtree_auto_spi_intbin_pmin0; else pair_count_func = kdtree_auto_spi_intbin; } } else { /* FCFC_BIN_TRUNC */ if (smin0) { if (pmin0) pair_count_func = kdtree_auto_spi_trunc_smin0_pmin0; else pair_count_func = kdtree_auto_spi_trunc_smin0; } else { if (pmin0) pair_count_func = kdtree_auto_spi_trunc_pmin0; else pair_count_func = kdtree_auto_spi_trunc; } } } } } else { /* !isauto */ if (usewt) { if (cf->bintype == FCFC_BIN_ISO) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_cross_iso_exact_smin0_wt; else pair_count_func = kdtree_cross_iso_exact_wt; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_cross_iso_intbin_smin0_wt; else pair_count_func = kdtree_cross_iso_intbin_wt; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_cross_iso_trunc_smin0_wt; else pair_count_func = kdtree_cross_iso_trunc_wt; } } else if (cf->bintype == FCFC_BIN_SMU) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_cross_smu_exact_smin0_wt; else pair_count_func = kdtree_cross_smu_exact_wt; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_cross_smu_intbin_smin0_wt; else pair_count_func = kdtree_cross_smu_intbin_wt; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_cross_smu_trunc_smin0_wt; else pair_count_func = kdtree_cross_smu_trunc_wt; } } else { /* FCFC_BIN_SPI */ bool pmin0 = (cf->pbin[0] < REAL_TOL && cf->pbin[0] > -REAL_TOL); if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) { if (pmin0) pair_count_func = kdtree_cross_spi_exact_smin0_pmin0_wt; else pair_count_func = kdtree_cross_spi_exact_smin0_wt; } else { if (pmin0) pair_count_func = kdtree_cross_spi_exact_pmin0_wt; else pair_count_func = kdtree_cross_spi_exact_wt; } } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) { if (pmin0) pair_count_func = kdtree_cross_spi_intbin_smin0_pmin0_wt; else pair_count_func = kdtree_cross_spi_intbin_smin0_wt; } else { if (pmin0) pair_count_func = kdtree_cross_spi_intbin_pmin0_wt; else pair_count_func = kdtree_cross_spi_intbin_wt; } } else { /* FCFC_BIN_TRUNC */ if (smin0) { if (pmin0) pair_count_func = kdtree_cross_spi_trunc_smin0_pmin0_wt; else pair_count_func = kdtree_cross_spi_trunc_smin0_wt; } else { if (pmin0) pair_count_func = kdtree_cross_spi_trunc_pmin0_wt; else pair_count_func = kdtree_cross_spi_trunc_wt; } } } } else { /* !usewt */ if (cf->bintype == FCFC_BIN_ISO) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_cross_iso_exact_smin0; else pair_count_func = kdtree_cross_iso_exact; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_cross_iso_intbin_smin0; else pair_count_func = kdtree_cross_iso_intbin; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_cross_iso_trunc_smin0; else pair_count_func = kdtree_cross_iso_trunc; } } else if (cf->bintype == FCFC_BIN_SMU) { if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) pair_count_func = kdtree_cross_smu_exact_smin0; else pair_count_func = kdtree_cross_smu_exact; } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) pair_count_func = kdtree_cross_smu_intbin_smin0; else pair_count_func = kdtree_cross_smu_intbin; } else { /* FCFC_BIN_TRUNC */ if (smin0) pair_count_func = kdtree_cross_smu_trunc_smin0; else pair_count_func = kdtree_cross_smu_trunc; } } else { /* FCFC_BIN_SPI */ bool pmin0 = (cf->pbin[0] < REAL_TOL && cf->pbin[0] > -REAL_TOL); if (cf->prec == REAL_NAN) { /* FCFC_BIN_EXACT */ if (smin0) { if (pmin0) pair_count_func = kdtree_cross_spi_exact_smin0_pmin0; else pair_count_func = kdtree_cross_spi_exact_smin0; } else { if (pmin0) pair_count_func = kdtree_cross_spi_exact_pmin0; else pair_count_func = kdtree_cross_spi_exact; } } else if (cf->prec == 1) { /* FCFC_BIN_INTEG */ if (smin0) { if (pmin0) pair_count_func = kdtree_cross_spi_intbin_smin0_pmin0; else pair_count_func = kdtree_cross_spi_intbin_smin0; } else { if (pmin0) pair_count_func = kdtree_cross_spi_intbin_pmin0; else pair_count_func = kdtree_cross_spi_intbin; } } else { /* FCFC_BIN_TRUNC */ if (smin0) { if (pmin0) pair_count_func = kdtree_cross_spi_trunc_smin0_pmin0; else pair_count_func = kdtree_cross_spi_trunc_smin0; } else { if (pmin0) pair_count_func = kdtree_cross_spi_trunc_pmin0; else pair_count_func = kdtree_cross_spi_trunc; } } } } } /* Initialise the stack for dual nodes. */ STACK_DUAL_NODE stack; stack.size = stack.capacity = 0; stack_push(&stack, tree1, tree2); #ifdef OMP /* Assign tasks to different OpenMP threads. */ size_t size = stack.size; /* for visiting all nodes at the same level */ while (stack.size != size || stack.size < (size_t) cf->nthread * FCFC_STACK_SIZE_PER_THREAD) { pair_count_func(&stack, cf, cnt); if (!stack.size) return; /* all pairs have been recorded */ if (size > 1) { size -= 1; /* reorder dual nodes, to ensure nodes at the same level are visited */ DUAL_NODE tmp = stack.nodes[stack.size - 1]; stack.nodes[stack.size - 1] = stack.nodes[size - 1]; stack.nodes[size - 1] = tmp; } else size = stack.size; } /* Clean the array for storing thread-private pair counts. */ memset(cf->pcnt, 0, sizeof(pair_count_t) * cf->ntot * cf->nthread); #pragma omp parallel { STACK_DUAL_NODE ps; /* thread-private stack */ ps.size = ps.capacity = 0; const int tid = omp_get_thread_num(); #pragma omp for schedule(dynamic) for (size_t i = 0; i < stack.size; i++) { stack_push(&ps, stack.nodes[i].a, stack.nodes[i].b); while (ps.size) pair_count_func(&ps, cf, cf->pcnt + tid * cf->ntot); } stack_destroy(&ps); } /* Gather pair counts from threads. */ if (usewt) { #pragma omp parallel for for (size_t i = 0; i < cf->ntot; i++) { for (int j = 0; j < cf->nthread; j++) cnt[i].d += cf->pcnt[i + j * cf->ntot].d; } } else { for (size_t i = 0; i < cf->ntot; i++) { for (int j = 0; j < cf->nthread; j++) cnt[i].i += cf->pcnt[i + j * cf->ntot].i; } } #else while (stack.size) pair_count_func(&stack, cf, cnt); #endif stack_destroy(&stack); }
GB_unop__identity_fc32_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_uint16) // op(A') function: GB (_unop_tran__identity_fc32_uint16) // C type: GxB_FC32_t // A type: uint16_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_uint16) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint64 // op(A') function: GB_tran__identity_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wino_conv_kernel_1_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: zhli@openailab.com */ #ifdef __aarch64__ #include "wino_conv_kernel_1_arm.h" #include "api/c_api.h" #include "utility/sys_port.h" #include <math.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <arm_neon.h> #define TILE 4 #define BLOCK_HW_UNIT 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) #define PER_OUT_CHAN 16 #define KER_COUT_UNIT 16 #define KER_COUT_UNIT4 4 void tran_inp_4(float*, float*, float*, int, int, int); void wino_sgemm_4x16_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_4x4_A72(float* output, const float* input, const float* kernel, long cin, short stride_save); void wino_sgemm_1x16(float* output, const float* input, const float* kernel, long cin); void wino_sgemm_1x4(float* output, const float* input, const float* kernel, long cin); void tran_out_4(float*, float*, int, float*, float*, int); #define INTERLEAVE_KERNEL_UNIT(cout_idx_p, cout_unit, cin, ker_src, ker_dst, ELEM_SIZE, i, j, s) \ { \ for (i = 0; i < cin; i++) \ { \ for (j = 0; j < cout_unit; j++) \ { \ *ker_dst = ker_src[((cout_idx_p + j) * cin + i) * ELEM_SIZE + s]; \ ker_dst++; \ } \ } \ } static inline void trans_kernel_f43(float* ker, float* trans_ker) { /* float G[18]={ 1./4 , 0. , 0. , -1./6 , -1./6 , -1./6 , -1./6 , 1./6 , -1./6 , 1./24 , 1./12 , 1./6 , 1./24 , -1./12 , 1./6 , 0. , 0. , 1. }; float GT[18]={ 1./4 , -1./6, -1./6 , 1./24, 1./24 , 0., 0., -1./6, 1./6 , 1./12, -1./12 , 0., 0., -1./6, -1./6 , 1./6, 1./6 , 1. }; */ float tmp[18] = {0}; float neg_r0_add_r2_x_1_6[6]; // (r0+r2)*1./6 float r0_1_4_add_r2_x_1_6[6]; // (r0*1/4 + r2)*1./6 float r1_1_6[6]; // r1*1/6 float r1_1_12[6]; // r1*1/12 float s_1_6 = 1. / 6.f; for (int j = 0; j < 3; j++) { neg_r0_add_r2_x_1_6[j] = -(ker[j] + ker[6 + j]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (ker[j] * 0.25 + ker[6 + j]) * s_1_6; r1_1_6[j] = ker[3 + j] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 3; j++) { tmp[j] = ker[j] * 0.25; tmp[3 + j] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[6 + j] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; tmp[9 + j] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[12 + j] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; tmp[15 + j] = ker[6 + j]; } // gemm(6,3,3,G,ker,tmp); done int idx; for (int j = 0; j < 6; j++) { idx = j * 3; neg_r0_add_r2_x_1_6[j] = -(tmp[idx] + tmp[idx + 2]) * s_1_6; r0_1_4_add_r2_x_1_6[j] = (tmp[idx] * 0.25 + tmp[idx + 2]) * s_1_6; r1_1_6[j] = tmp[idx + 1] * s_1_6; r1_1_12[j] = r1_1_6[j] * 0.5; } for (int j = 0; j < 6; j++) { idx = j * 6; trans_ker[idx] = tmp[j * 3] * 0.25; trans_ker[idx + 1] = -r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 2] = r1_1_6[j] + neg_r0_add_r2_x_1_6[j]; trans_ker[idx + 3] = r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 4] = -r1_1_12[j] + r0_1_4_add_r2_x_1_6[j]; trans_ker[idx + 5] = tmp[j * 3 + 2]; } // gemm(6,6,3,tmp,GT,trans_ker); done } static inline void transform_kernel_f43_tile(struct tensor* filter, float* trans_ker) { int outc = filter->dims[0]; int inc = filter->dims[1]; float* kernel = (float*)filter->data; float* ker_ptr = trans_ker; for (int i = 0; i < outc; i++) { for (int j = 0; j < inc; j++) { trans_kernel_f43((float*)(kernel + 9 * (j + i * inc)), ker_ptr); ker_ptr += ELEM_SIZE; } } } // ker0 [cout][cin][ELEM_SIZE] // ker1 [ELEM_SIZE][cout//KER_COUT_UNIT][cin][KER_COUT_UNIT] static inline void interleave_kernel_1(float* ker0, float* ker1, int cout, int cin) { int i, j; float* ker1_ptr = ker1; for (int s = 0; s < ELEM_SIZE; s++) { int p; //cout 16 for (p = 0; p < (cout & -KER_COUT_UNIT); p += KER_COUT_UNIT) { INTERLEAVE_KERNEL_UNIT(p, KER_COUT_UNIT, cin, ker0, ker1_ptr, ELEM_SIZE, i, j, s); } //cout 4 for (p = (cout & -KER_COUT_UNIT); p < (cout & -KER_COUT_UNIT4); p += KER_COUT_UNIT4) { INTERLEAVE_KERNEL_UNIT(p, KER_COUT_UNIT4, cin, ker0, ker1_ptr, ELEM_SIZE, i, j, s); } // cout 1 for (p = (cout & -KER_COUT_UNIT4); p < cout; p++) { INTERLEAVE_KERNEL_UNIT(p, 1, cin, ker0, ker1_ptr, ELEM_SIZE, i, j, s); } } } static inline void pad_input1(const float* input, float* inp_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad0, int pad1) { int padded_hw = padded_h * padded_w; float* pad_ptr; float* inp_ptr = (float*)input; int resi_h = padded_h - pad0 - inh; int resi_w = padded_w - pad1 - inw; for (int c = 0; c < inc; c++) { pad_ptr = inp_padded + c * padded_hw; // pad h_top memset(pad_ptr, 0, padded_w * pad0 * sizeof(float)); pad_ptr += pad0 * padded_w; // pad h_mid for (int h = 0; h < inh; h++) { // pad w_left memset(pad_ptr, 0, pad1 * sizeof(float)); // pad w_mid memcpy(pad_ptr + pad1, inp_ptr, inw * sizeof(float)); // pad w_end memset(pad_ptr + pad1 + inw, 0, resi_w * sizeof(float)); inp_ptr += inw; pad_ptr += padded_w; } // pad h_bottom memset(pad_ptr, 0, padded_w * resi_h * sizeof(float)); } } static inline void trans_inp_1tile(float* input, float* inp_ptr, int ih, int jw, int c, int in_hw, int inw) { float* inp = (float*)input + c * in_hw + ih * 4 * inw + jw * 4; float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float tmp[36] = {0}; float r1_add_r2[6]; float r3_add_r4[6]; float r1_minus_r2[6]; float r3_minus_r4[6]; float r4_minus_r2[6]; float r1_minus_r3[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = inp1[j] + inp2[j]; r1_minus_r2[j] = inp1[j] - inp2[j]; r3_add_r4[j] = inp3[j] + inp4[j]; r3_minus_r4[j] = inp3[j] - inp4[j]; r4_minus_r2[j] = inp4[j] - inp2[j]; r1_minus_r3[j] = inp1[j] - inp3[j]; } for (int j = 0; j < 6; j++) { tmp[j] = 4 * inp0[j] - 5 * inp2[j] + inp4[j]; tmp[6 + j] = r3_add_r4[j] - 4 * r1_add_r2[j]; tmp[12 + j] = 4 * r1_minus_r2[j] - r3_minus_r4[j]; tmp[18 + j] = r4_minus_r2[j] - 2 * r1_minus_r3[j]; tmp[24 + j] = r4_minus_r2[j] + 2 * r1_minus_r3[j]; tmp[30 + j] = 4 * inp1[j] - 5 * inp3[j] + inp5[j]; } float r1_4_minus_r3[6]; float r4_minus_4_r2[6]; float r4_minus_r2_[6]; float r1_minus_r3_x2[6]; for (int j = 0; j < 6; j++) { r4_minus_r2_[j] = tmp[j * 6 + 4] - tmp[j * 6 + 2]; r1_4_minus_r3[j] = 4 * tmp[j * 6 + 1] - tmp[j * 6 + 3]; r4_minus_4_r2[j] = tmp[j * 6 + 4] - 4 * tmp[j * 6 + 2]; r1_minus_r3_x2[j] = 2 * (tmp[j * 6 + 1] - tmp[j * 6 + 3]); } for (int j = 0; j < 6; j++) { inp_ptr[j * 6] = 4 * tmp[j * 6] - 5 * tmp[j * 6 + 2] + tmp[j * 6 + 4]; inp_ptr[1 + j * 6] = r4_minus_4_r2[j] - r1_4_minus_r3[j]; inp_ptr[2 + j * 6] = r4_minus_4_r2[j] + r1_4_minus_r3[j]; inp_ptr[3 + j * 6] = r4_minus_r2_[j] - r1_minus_r3_x2[j]; inp_ptr[4 + j * 6] = r4_minus_r2_[j] + r1_minus_r3_x2[j]; inp_ptr[5 + j * 6] = 4 * tmp[j * 6 + 1] - 5 * tmp[j * 6 + 3] + tmp[j * 6 + 5]; } } static inline void trans_inp_4_cpu(float* inp, float* inp_ptr, int inw, int s_size) { float* inp0 = inp; float* inp1 = inp0 + inw; float* inp2 = inp1 + inw; float* inp3 = inp2 + inw; float* inp4 = inp3 + inw; float* inp5 = inp4 + inw; float mid[36 * 4] = {0}; float r4_minus_r2[24]; float r1_4_minus_r3[24]; float r4_minus_4_r2[24]; float r1_minus_r3_x2[24]; for (int i = 0; i < 6; i++) { // 0 mid[i * 4] = 4 * inp0[i] - 5 * inp2[i] + inp4[i]; mid[(30 + i) * 4] = 4 * inp1[i] - 5 * inp3[i] + inp5[i]; r1_minus_r3_x2[i * 4 + 0] = (inp1[i] - inp3[i]) * 2; r1_4_minus_r3[i * 4 + 0] = 4 * inp1[i] - inp3[i]; r4_minus_4_r2[i * 4 + 0] = inp4[i] - 4 * inp2[i]; r4_minus_r2[i * 4 + 0] = inp4[i] - inp2[i]; // 1 mid[i * 4 + 1] = 4 * inp0[i + 4] - 5 * inp2[i + 4] + inp4[i + 4]; mid[(30 + i) * 4 + 1] = 4 * inp1[i + 4] - 5 * inp3[i + 4] + inp5[i + 4]; r1_minus_r3_x2[i * 4 + 1] = (inp1[i + 4] - inp3[i + 4]) * 2; r1_4_minus_r3[i * 4 + 1] = 4 * inp1[i + 4] - inp3[i + 4]; r4_minus_4_r2[i * 4 + 1] = inp4[i + 4] - 4 * inp2[i + 4]; r4_minus_r2[i * 4 + 1] = inp4[i + 4] - inp2[i + 4]; // 2 mid[i * 4 + 2] = 4 * inp0[i + 8] - 5 * inp2[i + 8] + inp4[i + 8]; mid[(30 + i) * 4 + 2] = 4 * inp1[i + 8] - 5 * inp3[i + 8] + inp5[i + 8]; r1_minus_r3_x2[i * 4 + 2] = (inp1[i + 8] - inp3[i + 8]) * 2; r1_4_minus_r3[i * 4 + 2] = 4 * inp1[i + 8] - inp3[i + 8]; r4_minus_4_r2[i * 4 + 2] = inp4[i + 8] - 4 * inp2[i + 8]; r4_minus_r2[i * 4 + 2] = inp4[i + 8] - inp2[i + 8]; // 3 mid[i * 4 + 3] = 4 * inp0[i + 12] - 5 * inp2[i + 12] + inp4[i + 12]; mid[(30 + i) * 4 + 3] = 4 * inp1[i + 12] - 5 * inp3[i + 12] + inp5[i + 12]; r1_minus_r3_x2[i * 4 + 3] = (inp1[i + 12] - inp3[i + 12]) * 2; r1_4_minus_r3[i * 4 + 3] = 4 * inp1[i + 12] - inp3[i + 12]; r4_minus_4_r2[i * 4 + 3] = inp4[i + 12] - 4 * inp2[i + 12]; r4_minus_r2[i * 4 + 3] = inp4[i + 12] - inp2[i + 12]; } //==================================================================== // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // mid[(6 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // mid[(12 + i) * 4 + k] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // mid[(18 + i) * 4 + k] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // mid[(24 + i) * 4 + k] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // } // } float32x4_t r0 = vld1q_f32(r4_minus_4_r2); float32x4_t r1 = vld1q_f32(r4_minus_4_r2 + 4); float32x4_t r2 = vld1q_f32(r4_minus_4_r2 + 8); float32x4_t r3 = vld1q_f32(r4_minus_4_r2 + 12); float32x4_t r4 = vld1q_f32(r4_minus_4_r2 + 16); float32x4_t r5 = vld1q_f32(r4_minus_4_r2 + 20); float32x4_t r0_ = vld1q_f32(r1_4_minus_r3); float32x4_t r1_ = vld1q_f32(r1_4_minus_r3 + 4); float32x4_t r2_ = vld1q_f32(r1_4_minus_r3 + 8); float32x4_t r3_ = vld1q_f32(r1_4_minus_r3 + 12); float32x4_t r4_ = vld1q_f32(r1_4_minus_r3 + 16); float32x4_t r5_ = vld1q_f32(r1_4_minus_r3 + 20); float32x4_t line0_0 = vld1q_f32(mid); float32x4_t line0_1 = vld1q_f32(mid + 4); float32x4_t line0_2 = vld1q_f32(mid + 8); float32x4_t line0_3 = vld1q_f32(mid + 12); float32x4_t line0_4 = vld1q_f32(mid + 16); float32x4_t line0_5 = vld1q_f32(mid + 20); float32x4_t line1_0 = vsubq_f32(r0, r0_); // mid[(6 + i) * 4 + k] [1][0] float32x4_t line1_1 = vsubq_f32(r1, r1_); // mid[(6 + i) * 4 + k] [1][1] float32x4_t line1_2 = vsubq_f32(r2, r2_); // mid[(6 + i) * 4 + k] [1][2] float32x4_t line1_3 = vsubq_f32(r3, r3_); // mid[(6 + i) * 4 + k] [1][3] float32x4_t line1_4 = vsubq_f32(r4, r4_); // mid[(6 + i) * 4 + k] [1][4] float32x4_t line1_5 = vsubq_f32(r5, r5_); // mid[(6 + i) * 4 + k] [1][5] float32x4_t line2_0 = vaddq_f32(r0, r0_); // mid[(12 + i) * 4 + k] [2][0] float32x4_t line2_1 = vaddq_f32(r1, r1_); // mid[(12 + i) * 4 + k] [2][1] float32x4_t line2_2 = vaddq_f32(r2, r2_); // mid[(12 + i) * 4 + k] [2][2] float32x4_t line2_3 = vaddq_f32(r3, r3_); // mid[(12 + i) * 4 + k] [2][3] float32x4_t line2_4 = vaddq_f32(r4, r4_); // mid[(12 + i) * 4 + k] [2][4] float32x4_t line2_5 = vaddq_f32(r5, r5_); // mid[(12 + i) * 4 + k] [2][5] r0 = vld1q_f32(r4_minus_r2); r1 = vld1q_f32(r4_minus_r2 + 4); r2 = vld1q_f32(r4_minus_r2 + 8); r3 = vld1q_f32(r4_minus_r2 + 12); r4 = vld1q_f32(r4_minus_r2 + 16); r5 = vld1q_f32(r4_minus_r2 + 20); r0_ = vld1q_f32(r1_minus_r3_x2); r1_ = vld1q_f32(r1_minus_r3_x2 + 4); r2_ = vld1q_f32(r1_minus_r3_x2 + 8); r3_ = vld1q_f32(r1_minus_r3_x2 + 12); r4_ = vld1q_f32(r1_minus_r3_x2 + 16); r5_ = vld1q_f32(r1_minus_r3_x2 + 20); float32x4_t line5_0 = vld1q_f32(mid + 120); float32x4_t line5_1 = vld1q_f32(mid + 124); float32x4_t line5_2 = vld1q_f32(mid + 128); float32x4_t line5_3 = vld1q_f32(mid + 132); float32x4_t line5_4 = vld1q_f32(mid + 136); float32x4_t line5_5 = vld1q_f32(mid + 140); float32x4_t line3_0 = vsubq_f32(r0, r0_); // mid[(18 + i) * 4 + k] [3][0] float32x4_t line3_1 = vsubq_f32(r1, r1_); // mid[(18 + i) * 4 + k] [3][1] float32x4_t line3_2 = vsubq_f32(r2, r2_); // mid[(18 + i) * 4 + k] [3][2] float32x4_t line3_3 = vsubq_f32(r3, r3_); // mid[(18 + i) * 4 + k] [3][3] float32x4_t line3_4 = vsubq_f32(r4, r4_); // mid[(18 + i) * 4 + k] [3][4] float32x4_t line3_5 = vsubq_f32(r5, r5_); // mid[(18 + i) * 4 + k] [3][5] float32x4_t line4_0 = vaddq_f32(r0, r0_); // mid[(24 + i) * 4 + k] [4][0] float32x4_t line4_1 = vaddq_f32(r1, r1_); // mid[(24 + i) * 4 + k] [4][1] float32x4_t line4_2 = vaddq_f32(r2, r2_); // mid[(24 + i) * 4 + k] [4][2] float32x4_t line4_3 = vaddq_f32(r3, r3_); // mid[(24 + i) * 4 + k] [4][3] float32x4_t line4_4 = vaddq_f32(r4, r4_); // mid[(24 + i) * 4 + k] [4][4] float32x4_t line4_5 = vaddq_f32(r5, r5_); // mid[(24 + i) * 4 + k] [4][5] // r4_minus_r2[i * 4 + k] i=0 = mid[0][4] r0 = vsubq_f32(line0_4, line0_2); r1 = vsubq_f32(line1_4, line1_2); r2 = vsubq_f32(line2_4, line2_2); r3 = vsubq_f32(line3_4, line3_2); r4 = vsubq_f32(line4_4, line4_2); r5 = vsubq_f32(line5_4, line5_2); r0_ = vsubq_f32(line0_1, line0_3); r1_ = vsubq_f32(line1_1, line1_3); r2_ = vsubq_f32(line2_1, line2_3); r3_ = vsubq_f32(line3_1, line3_3); r4_ = vsubq_f32(line4_1, line4_3); r5_ = vsubq_f32(line5_1, line5_3); float32x4_t const2 = vdupq_n_f32(2.f); r0_ = vmulq_f32(r0_, const2); r1_ = vmulq_f32(r1_, const2); r2_ = vmulq_f32(r2_, const2); r3_ = vmulq_f32(r3_, const2); r4_ = vmulq_f32(r4_, const2); r5_ = vmulq_f32(r5_, const2); vst1q_f32(inp_ptr + s_size * 3, vsubq_f32(r0, r0_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 9, vsubq_f32(r1, r1_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 15, vsubq_f32(r2, r2_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 21, vsubq_f32(r3, r3_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 27, vsubq_f32(r4, r4_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 33, vsubq_f32(r5, r5_)); // inp_ptr[ s_size * (3 + i * 6)] vst1q_f32(inp_ptr + s_size * 4, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 10, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 16, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 22, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 28, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (4 + i * 6)] vst1q_f32(inp_ptr + s_size * 34, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (4 + i * 6)] float32x4_t const4 = vdupq_n_f32(4.f); float32x4_t const5 = vdupq_n_f32(-5.f); r0_ = vmulq_f32(line0_1, const4); // line 1*4 ======== r1_ = vmulq_f32(line1_1, const4); r2_ = vmulq_f32(line2_1, const4); r3_ = vmulq_f32(line3_1, const4); r4_ = vmulq_f32(line4_1, const4); r5_ = vmulq_f32(line5_1, const4); float32x4_t rr0_ = vsubq_f32(r0_, line0_3); // line1*4-line3 float32x4_t rr1_ = vsubq_f32(r1_, line1_3); float32x4_t rr2_ = vsubq_f32(r2_, line2_3); float32x4_t rr3_ = vsubq_f32(r3_, line3_3); float32x4_t rr4_ = vsubq_f32(r4_, line4_3); float32x4_t rr5_ = vsubq_f32(r5_, line5_3); r0 = vmulq_f32(line0_2, const4); r1 = vmulq_f32(line1_2, const4); r2 = vmulq_f32(line2_2, const4); r3 = vmulq_f32(line3_2, const4); r4 = vmulq_f32(line4_2, const4); r5 = vmulq_f32(line5_2, const4); r0 = vsubq_f32(line0_4, r0); // line4 -4*line2 r1 = vsubq_f32(line1_4, r1); r2 = vsubq_f32(line2_4, r2); r3 = vsubq_f32(line3_4, r3); r4 = vsubq_f32(line4_4, r4); r5 = vsubq_f32(line5_4, r5); vst1q_f32(inp_ptr + s_size * 1, vsubq_f32(r0, rr0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 7, vsubq_f32(r1, rr1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 13, vsubq_f32(r2, rr2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 19, vsubq_f32(r3, rr3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 25, vsubq_f32(r4, rr4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 31, vsubq_f32(r5, rr5_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 2, vaddq_f32(r0, rr0_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 8, vaddq_f32(r1, rr1_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 14, vaddq_f32(r2, rr2_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 20, vaddq_f32(r3, rr3_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 26, vaddq_f32(r4, rr4_)); // inp_ptr[ s_size * (2 + i * 6)] vst1q_f32(inp_ptr + s_size * 32, vaddq_f32(r5, rr5_)); // inp_ptr[ s_size * (2 + i * 6)] r0_ = vaddq_f32(line0_5, r0_); // 5 + 1*4 r1_ = vaddq_f32(line1_5, r1_); r2_ = vaddq_f32(line2_5, r2_); r3_ = vaddq_f32(line3_5, r3_); r4_ = vaddq_f32(line4_5, r4_); r5_ = vaddq_f32(line5_5, r5_); r0 = vmulq_f32(line0_3, const5); r1 = vmulq_f32(line1_3, const5); r2 = vmulq_f32(line2_3, const5); r3 = vmulq_f32(line3_3, const5); r4 = vmulq_f32(line4_3, const5); r5 = vmulq_f32(line5_3, const5); vst1q_f32(inp_ptr + s_size * 5, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 11, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 17, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 23, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 29, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (5 + i * 6)] vst1q_f32(inp_ptr + s_size * 35, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (5 + i * 6)] r0 = vmulq_f32(line0_0, const4); r1 = vmulq_f32(line1_0, const4); r2 = vmulq_f32(line2_0, const4); r3 = vmulq_f32(line3_0, const4); r4 = vmulq_f32(line4_0, const4); r5 = vmulq_f32(line5_0, const4); r0_ = vmulq_f32(line0_2, const5); r1_ = vmulq_f32(line1_2, const5); r2_ = vmulq_f32(line2_2, const5); r3_ = vmulq_f32(line3_2, const5); r4_ = vmulq_f32(line4_2, const5); r5_ = vmulq_f32(line5_2, const5); r0 = vaddq_f32(r0, line0_4); r1 = vaddq_f32(r1, line1_4); r2 = vaddq_f32(r2, line2_4); r3 = vaddq_f32(r3, line3_4); r4 = vaddq_f32(r4, line4_4); r5 = vaddq_f32(r5, line5_4); vst1q_f32(inp_ptr + s_size * 0, vaddq_f32(r0, r0_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 6, vaddq_f32(r1, r1_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 12, vaddq_f32(r2, r2_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 18, vaddq_f32(r3, r3_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 24, vaddq_f32(r4, r4_)); // inp_ptr[ s_size * (1 + i * 6)] vst1q_f32(inp_ptr + s_size * 30, vaddq_f32(r5, r5_)); // inp_ptr[ s_size * (1 + i * 6)] // for(int i = 0; i < 6; i++) // { // for(int k = 0; k < 4; k++) // { // r4_minus_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - mid[(i * 6 + 2) * 4 + k]; // r1_4_minus_r3[i * 4 + k] = 4 * mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]; // r4_minus_4_r2[i * 4 + k] = mid[(i * 6 + 4) * 4 + k] - 4 * mid[(i * 6 + 2) * 4 + k]; // r1_minus_r3_x2[i * 4 + k] = 2 * (mid[(i * 6 + 1) * 4 + k] - mid[(i * 6 + 3) * 4 + k]); // } // } // for(int i = 1; i < 2; i++) // { // for(int k = 0; k < 4; k++) // { // inp_ptr[k + s_size * (i * 6)] = // 4 * mid[(i * 6) * 4 + k] - 5 * mid[(i * 6 + 2) * 4 + k] + mid[(i * 6 + 4) * 4 + k]; // // // inp_ptr[k + s_size * (1 + i * 6)] = r4_minus_4_r2[i * 4 + k] - r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (2 + i * 6)] = r4_minus_4_r2[i * 4 + k] + r1_4_minus_r3[i * 4 + k]; // // // inp_ptr[k + s_size * (3 + i * 6)] = r4_minus_r2[i * 4 + k] - r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (4 + i * 6)] = r4_minus_r2[i * 4 + k] + r1_minus_r3_x2[i * 4 + k]; // // // inp_ptr[k + s_size * (5 + i * 6)] = // // // 4 * mid[(i * 6 + 1) * 4 + k] - 5 * mid[(i * 6 + 3) * 4 + k] + mid[(i * 6 + 5) * 4 + k]; // } // } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] static inline void tran_input_4block(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh, int inw) { int in_hw = inh * inw; int block_hw = block_h * block_w; int nn_block = block_hw >> 2; int idxh[4]; int idxw[4]; for (int ib = 0; ib < nn_block; ib++) { float* inp_ptr_4tile = trans_inp + ib * 4 * ELEM_SIZE * inc; idxh[0] = (ib * 4) / block_w; idxh[1] = (ib * 4 + 1) / block_w; idxh[2] = (ib * 4 + 2) / block_w; idxh[3] = (ib * 4 + 3) / block_w; idxw[0] = (ib * 4) % block_w; idxw[1] = (ib * 4 + 1) % block_w; idxw[2] = (ib * 4 + 2) % block_w; idxw[3] = (ib * 4 + 3) % block_w; if (idxh[0] == idxh[3]) { float* temp_inp_ptr = (float*)(input + idxh[0] * 4 * inw + idxw[0] * 4); for (int c = 0; c < inc; c++) { float ker00[4] = {1, 2, 4, 5}; tran_inp_4(temp_inp_ptr, inp_ptr_4tile + 4 * c, ker00, inw, inc * 16, in_hw); temp_inp_ptr += in_hw; } } else { float buffer0[inc * ELEM_SIZE * 4]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { trans_inp_1tile((float*)input, buffer, idxh[0], idxw[0], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile((float*)input, buffer, idxh[1], idxw[1], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile((float*)input, buffer, idxh[2], idxw[2], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile((float*)input, buffer, idxh[3], idxw[3], c, in_hw, inw); buffer += ELEM_SIZE; } // interleave float* tmp_inp = inp_ptr_4tile; for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { for (int j = 0; j < 4; j++) { *tmp_inp = buffer0[i * ELEM_SIZE * 4 + j * ELEM_SIZE + s]; tmp_inp++; } } } // end interleave } } } // tran_inp [block_hw/4][36][inc][4] -> [36][block_hw/4][inc][4] static inline void tran_input_4block_1(const float* input, float* trans_inp, int inc, int block_h, int block_w, int inh, int inw, int num_thread) { int in_hw = inh * inw; int block_hw = block_h * block_w; int nn_block = block_hw >> 2; int idxh[4]; int idxw[4]; int s_size = block_hw * inc * sizeof(float); #pragma omp parallel for num_threads(num_thread) shared(block_hw, nn_block, in_hw) private(idxh, idxw) for (int ib = 0; ib < nn_block; ib++) { int off_set0 = ib * BLOCK_HW_UNIT * inc; idxh[0] = (ib * 4) / block_w; idxh[1] = (ib * 4 + 1) / block_w; idxh[2] = (ib * 4 + 2) / block_w; idxh[3] = (ib * 4 + 3) / block_w; idxw[0] = (ib * 4) % block_w; idxw[1] = (ib * 4 + 1) % block_w; idxw[2] = (ib * 4 + 2) % block_w; idxw[3] = (ib * 4 + 3) % block_w; if (idxh[0] == idxh[3]) { float* temp_inp_ptr = (float*)(input + idxh[0] * 4 * inw + idxw[0] * 4); for (int c = 0; c < inc; c++) { float ker00[4] = {1, 2, 4, 5}; tran_inp_4(temp_inp_ptr, trans_inp + c * 4 + off_set0, ker00, inw, s_size, in_hw); temp_inp_ptr += in_hw; } } else { float buffer0[inc * ELEM_SIZE * BLOCK_HW_UNIT]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { trans_inp_1tile((float*)input, buffer, idxh[0], idxw[0], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile((float*)input, buffer, idxh[1], idxw[1], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile((float*)input, buffer, idxh[2], idxw[2], c, in_hw, inw); buffer += ELEM_SIZE; trans_inp_1tile((float*)input, buffer, idxh[3], idxw[3], c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for (int s = 0; s < ELEM_SIZE; s++) { float* tmp_inp = trans_inp + s * block_hw * inc + off_set0; for (int i = 0; i < inc; i++) { for (int j = 0; j < BLOCK_HW_UNIT; j++) { *tmp_inp = buffer0[i * ELEM_SIZE * BLOCK_HW_UNIT + j * ELEM_SIZE + s]; tmp_inp++; } } } // end interleave } } } static inline void tran_input_resi_block(const float* input, float* trans_inp, int inc, int nn_block, int resi_block, int block_hw, int block_w, int in_hw, int inw) { float* inp_ptr = trans_inp + nn_block * 4 * ELEM_SIZE * inc; for (int ib = resi_block; ib < block_hw; ib++) { float buffer0[ELEM_SIZE * inc]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { int ih = ib / block_w; int jw = ib % block_w; trans_inp_1tile((float*)input, buffer, ih, jw, c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for (int s = 0; s < ELEM_SIZE; s++) { for (int i = 0; i < inc; i++) { *inp_ptr = buffer0[i * ELEM_SIZE + s]; inp_ptr++; } } // end interleave } } // tran_inp [block_resi][36][inc] -> [36][block_resi][inc] static inline void tran_input_resi_block_1(const float* input, float* trans_inp, int inc, int nn_block, int resi_block, int block_hw, int block_w, int in_hw, int inw) { for (int ib = resi_block; ib < block_hw; ib++) { int off_set0 = ib * inc; float buffer0[ELEM_SIZE * inc]; float* buffer = buffer0; for (int c = 0; c < inc; c++) { int ih = ib / block_w; int jw = ib % block_w; trans_inp_1tile((float*)input, buffer, ih, jw, c, in_hw, inw); buffer += ELEM_SIZE; } // interleave for (int s = 0; s < ELEM_SIZE; s++) { float* tmp_inp = trans_inp + s * block_hw * inc + off_set0; for (int i = 0; i < inc; i++) { *tmp_inp = buffer0[i * ELEM_SIZE + s]; tmp_inp++; } } // end interleave } } static inline float do_activation(float value, int activation) { if (activation >= 0) value = WINO_MAX(value, 0); if (activation == 6) value = WINO_MIN(value, 6); return value; } static inline void trans_output_f43(const float* mid, float* out, int outw, const float* bias_ptr, int activation) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float* out0 = out; float* out1 = out0 + outw; float* out2 = out1 + outw; float* out3 = out2 + outw; float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0] + bias, activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1] + bias, activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2] + bias, activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3] + bias, activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0] + bias, activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1] + bias, activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2] + bias, activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3] + bias, activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0] + bias, activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1] + bias, activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2] + bias, activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3] + bias, activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5] + bias, activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5] + bias, activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5] + bias, activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5] + bias, activation); } else { out0[0] = do_activation(tmp[0 * 6] + _r1_add_r2[0] + _r3_add_r4[0], activation); out1[0] = do_activation(tmp[1 * 6] + _r1_add_r2[1] + _r3_add_r4[1], activation); out2[0] = do_activation(tmp[2 * 6] + _r1_add_r2[2] + _r3_add_r4[2], activation); out3[0] = do_activation(tmp[3 * 6] + _r1_add_r2[3] + _r3_add_r4[3], activation); out0[1] = do_activation(_r1_minus_r2[0] + _r3_minus_r4_x2[0], activation); out1[1] = do_activation(_r1_minus_r2[1] + _r3_minus_r4_x2[1], activation); out2[1] = do_activation(_r1_minus_r2[2] + _r3_minus_r4_x2[2], activation); out3[1] = do_activation(_r1_minus_r2[3] + _r3_minus_r4_x2[3], activation); out0[2] = do_activation(_r1_add_r2[0] + 4 * _r3_add_r4[0], activation); out1[2] = do_activation(_r1_add_r2[1] + 4 * _r3_add_r4[1], activation); out2[2] = do_activation(_r1_add_r2[2] + 4 * _r3_add_r4[2], activation); out3[2] = do_activation(_r1_add_r2[3] + 4 * _r3_add_r4[3], activation); out0[3] = do_activation(_r1_minus_r2[0] + 4 * _r3_minus_r4_x2[0] + tmp[0 * 6 + 5], activation); out1[3] = do_activation(_r1_minus_r2[1] + 4 * _r3_minus_r4_x2[1] + tmp[1 * 6 + 5], activation); out2[3] = do_activation(_r1_minus_r2[2] + 4 * _r3_minus_r4_x2[2] + tmp[2 * 6 + 5], activation); out3[3] = do_activation(_r1_minus_r2[3] + 4 * _r3_minus_r4_x2[3] + tmp[3 * 6 + 5], activation); } } static inline void trans_output_f43_ordinary(const float* mid, float* out, const float* bias_ptr) { /* float AT[24]={ 1., 1., 1., 1., 1., 0., 0., 1., -1., 2., -2., 0., 0., 1., 1., 4., 4., 0., 0., 1., -1., 8., -8., 1. }; float A[24]={ 1., 0., 0., 0., 1., 1., 1., 1., 1., -1., 1., -1., 1., 2., 4., 8., 1., -2., 4., -8., 0., 0., 0., 1. }; */ float tmp[24] = {0}; float r1_add_r2[6]; float r1_minus_r2[6]; float r3_add_r4[6]; float r3_minus_r4_x2[6]; for (int j = 0; j < 6; j++) { r1_add_r2[j] = mid[6 * 1 + j] + mid[6 * 2 + j]; r1_minus_r2[j] = mid[6 * 1 + j] - mid[6 * 2 + j]; r3_add_r4[j] = mid[6 * 3 + j] + mid[6 * 4 + j]; r3_minus_r4_x2[j] = (mid[6 * 3 + j] - mid[6 * 4 + j]) * 2; } for (int j = 0; j < 6; j++) { tmp[j] = mid[j] + r1_add_r2[j] + r3_add_r4[j]; tmp[6 + j] = r1_minus_r2[j] + r3_minus_r4_x2[j]; tmp[12 + j] = r1_add_r2[j] + 4 * r3_add_r4[j]; tmp[18 + j] = r1_minus_r2[j] + 4 * r3_minus_r4_x2[j] + mid[6 * 5 + j]; } float _r1_add_r2[4]; float _r1_minus_r2[4]; float _r3_add_r4[4]; float _r3_minus_r4_x2[4]; int idx; for (int j = 0; j < 4; j++) { idx = 6 * j; _r1_add_r2[j] = tmp[idx + 1] + tmp[idx + 2]; _r1_minus_r2[j] = tmp[idx + 1] - tmp[idx + 2]; _r3_add_r4[j] = tmp[idx + 3] + tmp[idx + 4]; _r3_minus_r4_x2[j] = (tmp[idx + 3] - tmp[idx + 4]) * 2; } if (bias_ptr) { float bias = bias_ptr[0]; for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = bias + tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = bias + _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = bias + _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = bias + _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } else { for (int j = 0; j < 4; j++) { idx = j * 4; out[idx] = tmp[j * 6] + _r1_add_r2[j] + _r3_add_r4[j]; out[idx + 1] = _r1_minus_r2[j] + _r3_minus_r4_x2[j]; out[idx + 2] = _r1_add_r2[j] + 4 * _r3_add_r4[j]; out[idx + 3] = _r1_minus_r2[j] + 4 * _r3_minus_r4_x2[j] + tmp[j * 6 + 5]; } } } static inline void transform_output_f43_1tile(const float* buffer_ptr, float* out, int p_idx, int idx_blockhw, int block_h, int block_w, int out_hw, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; if (bias) { bias_ptr = (bias + cout_idx); } float* out_ptr = out + cout_idx * out_hw; int i_h = idx_blockhw / block_w; int j_w = idx_blockhw % block_w; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * TILE + ww], activation); } } } buffer_ptr += ELEM_SIZE; } } static inline void transform_output_f43_4tile(float* buffer_ptr, float* out, int p_idx, int block_idx, int block_h, int block_w, int outh, int outw, int resi_h, int resi_w, int KER_COUT_UNIT_, const float* bias, int activation) { int out_hw = outh * outw; float tmp_buffer[TILE * TILE]; int idx_h[4]; int idx_w[4]; idx_h[0] = (block_idx) / block_w; idx_h[1] = (block_idx + 1) / block_w; idx_h[2] = (block_idx + 2) / block_w; idx_h[3] = (block_idx + 3) / block_w; idx_w[0] = (block_idx) % block_w; idx_w[1] = (block_idx + 1) % block_w; idx_w[2] = (block_idx + 2) % block_w; idx_w[3] = (block_idx + 3) % block_w; float* bias_ptr = NULL; for (int p = 0; p < KER_COUT_UNIT_; p++) { int cout_idx = p_idx + p; float* out_ptr = out + cout_idx * out_hw; if (bias) { bias_ptr = (float*)bias + cout_idx; } for (int ii = 0; ii < 4; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer_ptr, out_ptr + (i_h * TILE * outw + j_w * TILE), outw, bias_ptr, activation); } // direct use_out_ptr else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; // tmp_buffer trans_output_f43_ordinary(buffer_ptr, tmp_buffer, bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * outw + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * outw + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } // end else, tmp_buff buffer_ptr += ELEM_SIZE; } } } // trans_input [block_hw/4][ELEM_SIZE][inc][4] // kernel [out_c/PER_OUT_CHAN][ELEM_SIZE][in_c][PER_OUT_CHAN] static void wino_sgemm_4x16_1(const float* ker, const float* inp, float* output, int cin, int cout_end, int block_h, int block_w, int out_c, int num_thread, int s, int cpu_affinity) { int block_hw = block_h * block_w; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < (cout_end & -PER_OUT_CHAN); p += PER_OUT_CHAN) { float* out_ptr = output + p * ELEM_SIZE * block_hw; float* out_ptr1; int i; for (i = 0; i < (block_hw & -4); i += 4) { out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT; int offset = s * block_hw * cin + i * cin; int offset_ker = s * cin * out_c + p * cin; //#ifdef __aarch64__ wino_sgemm_4x16_A72(out_ptr1 + s * BLOCK_HW_UNIT, inp + offset, ker + offset_ker, cin, 1); } for (; i < block_hw; i++) { out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT; int offset_ker = s * cin * out_c + p * cin; int offset = s * block_hw * cin + i * cin; wino_sgemm_1x16(out_ptr1 + s * KER_COUT_UNIT, inp + offset, ker + offset_ker, cin); } } } void wino_sgemm_4x4_1(const float* ker, const float* inp, float* output, int cin, int cout_start, int cout_end, int block_h, int block_w, int out_c, int activation, int s, int num_thread, int cpu_affinity) { int block_start = 0; int block_hw = block_h * block_w; int block_end = block_hw; #pragma omp parallel for num_threads(num_thread) for (int p = (cout_start & -KER_COUT_UNIT4); p < (cout_end & -KER_COUT_UNIT4); p += KER_COUT_UNIT4) { float* out_ptr = output + p * ELEM_SIZE * block_hw; int i = 0; for (i = (block_start & -4); i < (block_end & -4); i += 4) { float* out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT4; int offset = s * block_hw * cin + i * cin; int offset_ker = s * cin * out_c + p * cin; //#ifdef __aarch64__ wino_sgemm_4x4_A72(out_ptr1 + s * BLOCK_HW_UNIT, inp + offset, ker + offset_ker, cin, 1); } for (; i < block_end; i++) { float* out_ptr1 = out_ptr + i * ELEM_SIZE * KER_COUT_UNIT4; int offset_ker = s * cin * out_c + p * cin; int offset = s * block_hw * cin + i * cin; wino_sgemm_1x4(out_ptr1 + s * KER_COUT_UNIT4, inp + offset, ker + offset_ker, cin); } } for (int p = (cout_end & -KER_COUT_UNIT4); p < cout_end; p++) { float* out_ptr = output + p * ELEM_SIZE * block_hw; float* ker_ = (float*)(ker + s * cin * out_c + p * cin); int i = 0; for (i = (block_start & -4); i < (block_end & -4); i += 4) { float* out_ptr1 = out_ptr + i * ELEM_SIZE + s * BLOCK_HW_UNIT; float* inp_ = (float*)(inp + s * block_hw * cin + i * cin); float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < cin; k++) { sum0 += inp_[k * 4] * ker_[k]; sum1 += inp_[k * 4 + 1] * ker_[k]; sum2 += inp_[k * 4 + 2] * ker_[k]; sum3 += inp_[k * 4 + 3] * ker_[k]; } out_ptr1[0] = sum0; out_ptr1[1] = sum1; out_ptr1[2] = sum2; out_ptr1[3] = sum3; } for (; i < block_end; i++) { float* out_ptr1 = out_ptr + i * ELEM_SIZE + s; float* inp_ = (float*)(inp + s * block_hw * cin + i * cin); float sum0 = 0; for (int k = 0; k < cin; k++) { sum0 += inp_[k] * ker_[k]; } out_ptr1[0] = sum0; } } } /* transform output */ static inline void trans_output_p(float* trans_out_ptr, float* output, float* bias, int bias_term, int block_h, int block_w, int block_hw, int out_hw, int out_w, int resi_h, int resi_w, int activation, int p, int KER_COUT_UNIT_) { int flag_outw = 1; if (out_w < 16) flag_outw = 0; int i; for (i = 0; i < (block_hw & -BLOCK_HW_UNIT); i += BLOCK_HW_UNIT) { float* buffer_ptr = trans_out_ptr + i * KER_COUT_UNIT_ * ELEM_SIZE; int idx_h[4]; int idx_w[4]; idx_h[0] = (i) / block_w; idx_h[1] = (i + 1) / block_w; idx_h[2] = (i + 2) / block_w; idx_h[3] = (i + 3) / block_w; idx_w[0] = (i) % block_w; idx_w[1] = (i + 1) % block_w; idx_w[2] = (i + 2) % block_w; idx_w[3] = (i + 3) % block_w; int wino_out_4_tiles = 0; if (flag_outw) { if ((idx_h[0] == idx_h[3]) && (idx_h[0] < (block_h - 1)) && (idx_w[3] < (block_w - 1))) { wino_out_4_tiles = 1; } } if (wino_out_4_tiles == 1) { float* bias_ptr = NULL; for (int pss = 0; pss < KER_COUT_UNIT_; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw + idx_h[0] * TILE * out_w + idx_w[0] * TILE; if (bias_term) { bias_ptr = (float*)(bias + cout_idx); } float ker00[4] = {2, 4, 8, 0}; tran_out_4(buffer_ptr + pss * ELEM_SIZE * BLOCK_HW_UNIT, out_ptr, out_w * sizeof(float), ker00, bias_ptr, activation); } } else { float tmp_buffer[TILE * TILE]; const float* bias_ptr = NULL; for (int pss = 0; pss < KER_COUT_UNIT_; pss++) { int cout_idx = p + pss; float* out_ptr = output + cout_idx * out_hw; if (bias_term) { bias_ptr = bias + cout_idx; } float buffer[BLOCK_HW_UNIT * ELEM_SIZE]; float* buffer_ptr0 = buffer; float* mid_ptr = buffer_ptr + pss * BLOCK_HW_UNIT * ELEM_SIZE; for (int t = 0; t < BLOCK_HW_UNIT; t++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer_ptr0 = mid_ptr[ss * BLOCK_HW_UNIT + t]; buffer_ptr0++; } } for (int ii = 0; ii < BLOCK_HW_UNIT; ii++) { int i_h = idx_h[ii]; int j_w = idx_w[ii]; if ((resi_h == 0 && resi_w == 0) || (resi_h == 0 && (j_w < block_w - 1)) || (resi_w == 0 && (i_h < block_h - 1)) || ((j_w < block_w - 1) && (i_h < block_h - 1))) { trans_output_f43(buffer + ii * ELEM_SIZE, out_ptr + (i_h * TILE * out_w + j_w * TILE), out_w, (const float*)bias_ptr, activation); } else { int ret_h = TILE - resi_h; if (i_h < block_h - 1) ret_h = TILE; int ret_w = TILE - resi_w; if (j_w < block_w - 1) ret_w = TILE; trans_output_f43_ordinary(buffer + ii * ELEM_SIZE, tmp_buffer, (const float*)bias_ptr); float* out_pointer = out_ptr + (i_h * TILE * out_w + j_w * TILE); for (int hh = 0; hh < ret_h; hh++) { for (int ww = 0; ww < ret_w; ww++) { out_pointer[hh * out_w + ww] = do_activation(tmp_buffer[hh * 4 + ww], activation); } } } } } } } for (; i < block_hw; i++) { float* buffer_ptr = trans_out_ptr + i * KER_COUT_UNIT_ * ELEM_SIZE; float resi_buffer[KER_COUT_UNIT_ * ELEM_SIZE]; float* buffer0 = resi_buffer; for (int pp = 0; pp < KER_COUT_UNIT_; pp++) { for (int ss = 0; ss < ELEM_SIZE; ss++) { *buffer0 = buffer_ptr[ss * KER_COUT_UNIT_ + pp]; buffer0++; } } transform_output_f43_1tile(resi_buffer, output, p, i, block_h, block_w, out_hw, out_w, resi_h, resi_w, KER_COUT_UNIT_, bias, activation); } } // transform output static inline void trans_output_1(float* trans_out, float* output, float* bias, int bias_term, int block_h, int block_w, int cout_start, int cout_end, int out_hw, int out_w, int resi_h, int resi_w, int activation, int num_thread) { int block_hw = block_h * block_w; int p; //cout 16 #pragma omp parallel for num_threads(num_thread) shared(block_hw) for (p = cout_start; p < (cout_end & -KER_COUT_UNIT); p += KER_COUT_UNIT) { trans_output_p(trans_out + p * block_hw * ELEM_SIZE, output, bias, bias_term, block_h, block_w, block_hw, out_hw, out_w, resi_h, resi_w, activation, p, KER_COUT_UNIT); } //cout 4 #pragma omp parallel for num_threads(num_thread) shared(block_hw) for (p = (cout_end & -KER_COUT_UNIT); p < (cout_end & -KER_COUT_UNIT4); p += KER_COUT_UNIT4) { trans_output_p(trans_out + p * block_hw * ELEM_SIZE, output, bias, bias_term, block_h, block_w, block_hw, out_hw, out_w, resi_h, resi_w, activation, p, KER_COUT_UNIT4); } // cout 1 #pragma omp parallel for num_threads(num_thread) shared(block_hw) for (p = (cout_end & -KER_COUT_UNIT4); p < cout_end; p++) { trans_output_p(trans_out + p * block_hw * ELEM_SIZE, output, bias, bias_term, block_h, block_w, block_hw, out_hw, out_w, resi_h, resi_w, activation, p, 1); } } static int get_private_mem_size(struct tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } int wino_conv_hcl_prerun_1(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { // fTLOG_ERR(stderr,"run into wino_1 prerun.\n"); int output_c = filter_tensor->dims[0]; int input_c = filter_tensor->dims[1]; int mem_size = get_private_mem_size(filter_tensor, param); float* trans_mem = (float*)sys_malloc(mem_size); if (!priv_info->external_interleave_mem) { void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } transform_kernel_f43_tile(filter_tensor, trans_mem); interleave_kernel_1(trans_mem, (float*)priv_info->interleave_buffer, output_c, input_c); sys_free(trans_mem); return 0; } int wino_conv_hcl_run_1(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; // pad int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; // input int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; // output int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input_buf = (float*)input_tensor->data; float* output_buf = (float*)output_tensor->data; float* biases_buf = NULL; int bias_term = 0; if (bias_tensor != NULL) { biases_buf = (float*)bias_tensor->data; bias_term = 1; } float* col_buf = (float*)priv_info->im2col_buffer; float* interleave_buf = (float*)priv_info->interleave_buffer; int inp_padded_size = sizeof(float) * (in_c * padded_in_hw + 2); int nn_out_c = (out_c / PER_OUT_CHAN) * PER_OUT_CHAN; int nn_block = block_hw >> 2; int resi_block = nn_block << 2; int resi_h = block_h * TILE - out_h; int resi_w = block_w * TILE - out_w; for (int n = 0; n < batch; n++) { float* input_padded = (float*)sys_malloc(inp_padded_size); float* trans_inp = (float*)sys_malloc(sizeof(float) * ELEM_SIZE * in_c * block_hw + 128); float* trans_out = (float*)sys_malloc(sizeof(float) * ELEM_SIZE * out_c * block_hw); float* input = input_buf + n * input_size; float* output = output_buf + n * output_size; /* PAD input */ pad_input1(input, input_padded, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_h0, pad_w0); /* trans input */ tran_input_4block_1(input_padded, trans_inp, in_c, block_h, block_w, padded_in_h, padded_in_w, num_thread); if (resi_block != block_hw) { tran_input_resi_block_1(input_padded, trans_inp, in_c, nn_block, resi_block, block_hw, block_w, padded_in_hw, padded_in_w); } sys_free(input_padded); /* gemm */ for (int s = 0; s < ELEM_SIZE; s++) { wino_sgemm_4x16_1(interleave_buf, trans_inp, trans_out, in_c, nn_out_c, block_h, block_w, out_c, num_thread, s, cpu_affinity); if (nn_out_c != out_c) { wino_sgemm_4x4_1(interleave_buf, trans_inp, trans_out, in_c, nn_out_c, out_c, block_h, block_w, out_c, act_type, s, num_thread, cpu_affinity); } } sys_free(trans_inp); trans_output_1(trans_out, output, biases_buf, bias_term, block_h, block_w, 0, out_c, out_hw, out_w, resi_h, resi_w, act_type, num_thread); sys_free(trans_out); } return 0; } #endif
GB_binop__times_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__times_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint8) // A*D function (colscale): GB (_AxD__times_uint8) // D*A function (rowscale): GB (_DxB__times_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint8) // C=scalar+B GB (_bind1st__times_uint8) // C=scalar+B' GB (_bind1st_tran__times_uint8) // C=A+scalar GB (_bind2nd__times_uint8) // C=A'+scalar GB (_bind2nd_tran__times_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mandelbrot.c
/* Stress test can not find put why its not working on newriver To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for OpenMP header file here: #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for private(m,c) shared(dr, di) for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[argc - 1]); // Q2b: set the number of OpenMP threads to be Nthreads here: omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time double start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
parallel_if0.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" int main() { // print_frame(0); #pragma omp parallel if(0) { // print_frame(1); print_ids(0); print_ids(1); // print_frame(0); #pragma omp parallel if(0) { // print_frame(1); print_ids(0); print_ids(1); print_ids(2); // print_frame(0); #pragma omp task { // print_frame(1); print_ids(0); print_ids(1); print_ids(2); print_ids(3); } } print_fuzzy_address(1); } print_fuzzy_address(2); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[NESTED_IMPLICIT_TASK_ID]], second_task_id=[[EXPLICIT_TASK_ID]], prior_task_status=ompt_task_switch=7 // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[EXPLICIT_TASK_ID]], second_task_id=[[NESTED_IMPLICIT_TASK_ID]], prior_task_status=ompt_task_complete=1 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[EXPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[NESTED_IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] return 0; }
maxpool_layer.c
#include "maxpool_layer.h" #include "convolutional_layer.h" #include "dark_cuda.h" #include "gemm.h" #include <stdio.h> image get_maxpool_image(maxpool_layer l) { int h = l.out_h; int w = l.out_w; int c = l.c; return float_to_image(w,h,c,l.output); } image get_maxpool_delta(maxpool_layer l) { int h = l.out_h; int w = l.out_w; int c = l.c; return float_to_image(w,h,c,l.delta); } void cudnn_maxpool_setup(layer *l) { #ifdef CUDNN cudnnStatus_t maxpool_status; maxpool_status = cudnnCreatePoolingDescriptor(&l->poolingDesc); maxpool_status = cudnnSetPooling2dDescriptor( l->poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN l->size, l->size, l->pad/2, //0, //l.pad, l->pad/2, //0, //l.pad, l->stride_x, l->stride_y); cudnnCreateTensorDescriptor(&l->srcTensorDesc); cudnnCreateTensorDescriptor(&l->dstTensorDesc); cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); #endif // CUDNN } maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing) { maxpool_layer l = { (LAYER_TYPE)0 }; l.type = MAXPOOL; const int blur_stride_x = stride_x; const int blur_stride_y = stride_y; l.antialiasing = antialiasing; if (antialiasing) { stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer } l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.maxpool_depth = maxpool_depth; l.out_channels = out_channels; if (maxpool_depth) { l.out_c = out_channels; l.out_w = l.w; l.out_h = l.h; } else { l.out_w = (w + padding - size) / stride_x + 1; l.out_h = (h + padding - size) / stride_y + 1; l.out_c = c; } l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride_x; l.stride_x = stride_x; l.stride_y = stride_y; int output_size = l.out_h * l.out_w * l.out_c * batch; l.indexes = (int*)calloc(output_size, sizeof(int)); l.output = (float*)calloc(output_size, sizeof(float)); l.delta = (float*)calloc(output_size, sizeof(float)); l.forward = forward_maxpool_layer; l.backward = backward_maxpool_layer; #ifdef GPU l.forward_gpu = forward_maxpool_layer_gpu; l.backward_gpu = backward_maxpool_layer_gpu; l.indexes_gpu = cuda_make_int_array(output_size); l.output_gpu = cuda_make_array(l.output, output_size); l.delta_gpu = cuda_make_array(l.delta, output_size); cudnn_maxpool_setup(&l); #endif // GPU l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.; if (maxpool_depth) fprintf(stderr, "max-depth %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else if(stride_x == stride_y) fprintf(stderr, "max %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else fprintf(stderr, "max %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); if (l.antialiasing) { printf("AA: "); l.input_layer = (layer*)calloc(1, sizeof(layer)); const int blur_size = 3; *(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_size / 2, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL, 0); const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size; int i; for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) { /* l.input_layer->weights[i + 0] = 0; l.input_layer->weights[i + 1] = 0; l.input_layer->weights[i + 2] = 0; l.input_layer->weights[i + 3] = 0; l.input_layer->weights[i + 4] = 1; l.input_layer->weights[i + 5] = 0; l.input_layer->weights[i + 6] = 0; l.input_layer->weights[i + 7] = 0; l.input_layer->weights[i + 8] = 0; */ l.input_layer->weights[i + 0] = 1 / 16.f; l.input_layer->weights[i + 1] = 2 / 16.f; l.input_layer->weights[i + 2] = 1 / 16.f; l.input_layer->weights[i + 3] = 2 / 16.f; l.input_layer->weights[i + 4] = 4 / 16.f; l.input_layer->weights[i + 5] = 2 / 16.f; l.input_layer->weights[i + 6] = 1 / 16.f; l.input_layer->weights[i + 7] = 2 / 16.f; l.input_layer->weights[i + 8] = 1 / 16.f; } for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0; #ifdef GPU l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs); push_convolutional_layer(*(l.input_layer)); #endif // GPU } return l; } void resize_maxpool_layer(maxpool_layer *l, int w, int h) { l->h = h; l->w = w; l->inputs = h*w*l->c; l->out_w = (w + l->pad - l->size) / l->stride_x + 1; l->out_h = (h + l->pad - l->size) / l->stride_y + 1; l->outputs = l->out_w * l->out_h * l->out_c; int output_size = l->outputs * l->batch; l->indexes = (int*)realloc(l->indexes, output_size * sizeof(int)); l->output = (float*)realloc(l->output, output_size * sizeof(float)); l->delta = (float*)realloc(l->delta, output_size * sizeof(float)); #ifdef GPU CHECK_CUDA(cudaFree((float *)l->indexes_gpu)); CHECK_CUDA(cudaFree(l->output_gpu)); CHECK_CUDA(cudaFree(l->delta_gpu)); l->indexes_gpu = cuda_make_int_array(output_size); l->output_gpu = cuda_make_array(l->output, output_size); l->delta_gpu = cuda_make_array(l->delta, output_size); cudnn_maxpool_setup(l); #endif } void forward_maxpool_layer(const maxpool_layer l, network_state state) { if (l.maxpool_depth) { int b, i, j, k, g; for (b = 0; b < l.batch; ++b) { #pragma omp parallel for for (i = 0; i < l.h; ++i) { for (j = 0; j < l.w; ++j) { for (g = 0; g < l.out_c; ++g) { int out_index = j + l.w*(i + l.h*(g + l.out_c*b)); float max = -FLT_MAX; int max_i = -1; for (k = g; k < l.c; k += l.out_c) { int in_index = j + l.w*(i + l.h*(k + l.c*b)); float val = state.input[in_index]; max_i = (val > max) ? in_index : max_i; max = (val > max) ? val : max; } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } return; } if (!state.train && l.stride_x == l.stride_y) { forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch); } else { int b, i, j, k, m, n; int w_offset = -l.pad / 2; int h_offset = -l.pad / 2; int h = l.out_h; int w = l.out_w; int c = l.c; for (b = 0; b < l.batch; ++b) { for (k = 0; k < c; ++k) { for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < l.size; ++n) { for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride_y + n; int cur_w = w_offset + j*l.stride_x + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? state.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } l.output[out_index] = max; l.indexes[out_index] = max_i; } } } } } if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; s.input = l.output; forward_convolutional_layer(*(l.input_layer), s); //simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing); memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float)); } } void backward_maxpool_layer(const maxpool_layer l, network_state state) { int i; int h = l.out_h; int w = l.out_w; int c = l.out_c; #pragma omp parallel for for(i = 0; i < h*w*c*l.batch; ++i){ int index = l.indexes[i]; state.delta[index] += l.delta[i]; } }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,const MagickBooleanType, ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(MagickFalse); image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; register ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image-private.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/thread-private.h" #include "magick/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MaxTextExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info,MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) memset(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AllocateSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); DestroySemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). Also represents % the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usually 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', you % can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickExport MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) memset(columns,0,rank*sizeof(*columns)); (void) memset(rows,0,rank*sizeof(*rows)); (void) memset(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor, value; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&value); min_value=value; max_value=value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; register PixelPacket *q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); q->red=ClampToQuantum(value); q->green=q->red; q->blue=q->red; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the memset method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) memset(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
10-omp.c
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ } // RUN: clang -fopenmp -c -g -emit-llvm %s -o %t.1.bc // RUN: opt -instnamer %t.1.bc -o %t.bc // RUN: llvm-epp %t.bc -o %t.profile // RUN: clang -fopenmp -v %t.epp.bc -o %t-exec -lepp-rt -lpthread 2> %t.compile // RUN: OMP_NUM_THREADS=10 %t-exec > %t.log // RUN: llvm-epp -p=%t.profile %t.bc 2> %t.decode // RUN: diff -aub %t.profile %s.txt
EwaldRef.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory // // File created by: Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory ////////////////////////////////////////////////////////////////////////////////////// /**@file EwaldRef.h * * @brief Computes Ewald sums of the potential energy to a given * tolerance for arbitrary collections of charges. * * The implementation follows formulas 6 and 7 from: * * N. D. Drummond et al., Physical Review B 78 125106 (2008) * * DOI: https://doi.org/10.1103/PhysRevB.78.125106 */ #ifndef EWALD_REF_H #define EWALD_REF_H #include <cmath> #include "Configuration.h" #include "OhmmsPETE/TinyVector.h" #include "OhmmsPETE/Tensor.h" #include "Utilities/TimerManager.h" namespace qmcplusplus { namespace ewaldref { /// Reference Ewald implemented for 3D only enum { DIM = 3 }; /// Type for integers using int_t = int; /// Type for floating point numbers using real_t = double; /// Type for integer vectors of length DIM using IntVec = TinyVector<int_t, DIM>; /// Type for floating point vectors of length DIM using RealVec = TinyVector<real_t, DIM>; /// Type for floating point matrices of shape DIM,DIM using RealMat = Tensor<real_t, DIM>; /// Type for lists of particle positions using PosArray = std::vector<RealVec>; /// Type for lists of particle charges using ChargeArray = std::vector<real_t>; /// Functor for term within the real-space sum in Drummond 2008 formula 7 class RspaceMadelungTerm { private: /// The real-space cell axes const RealMat a; /// The constant \kappa in Drummond 2008 formula 7 const real_t rconst; public: RspaceMadelungTerm(const RealMat& a_in, real_t rconst_in) : a(a_in), rconst(rconst_in) {} real_t operator()(const IntVec& i) const { RealVec Rv = dot(i, a); real_t R = std::sqrt(dot(Rv, Rv)); real_t rm = std::erfc(rconst * R) / R; return rm; } }; /// Functor for term within the k-space sum in Drummond 2008 formula 7 class KspaceMadelungTerm { private: /// The k-space cell axes const RealMat b; /// The constant 1/(4\kappa^2) in Drummond 2008 formula 7 const real_t kconst; /// The constant 4\pi/\Omega in Drummond 2008 formula 7 const real_t kfactor; public: KspaceMadelungTerm(const RealMat& b_in, real_t kconst_in, real_t kfactor_in) : b(b_in), kconst(kconst_in), kfactor(kfactor_in) {} real_t operator()(const IntVec& i) const { RealVec Kv = dot(i, b); real_t K2 = dot(Kv, Kv); real_t km = kfactor * std::exp(kconst * K2) / K2; return km; } }; /// Functor for term within the real-space sum in Drummond 2008 formula 6 class RspaceEwaldTerm { private: /// The inter-particle separation vector const RealVec r; /// The real-space cell axes const RealMat a; /// The constant 1/(\sqrt{2}\kappa) in Drummond 2008 formula 6 const real_t rconst; public: RspaceEwaldTerm(const RealVec& r_in, const RealMat& a_in, real_t rconst_in) : r(r_in), a(a_in), rconst(rconst_in) {} real_t operator()(const IntVec& i) const { RealVec Rv = dot(i, a); for (int_t d : {0, 1, 2}) Rv[d] -= r[d]; real_t R = std::sqrt(dot(Rv, Rv)); real_t rm = std::erfc(rconst * R) / R; return rm; } }; /// Functor for term within the k-space sum in Drummond 2008 formula 6 class KspaceEwaldTerm { private: /// The inter-particle separation vector const RealVec r; /// The k-space cell axes const RealMat b; /// The constant -\kappa^2/2 in Drummond 2008 formula 6 const real_t kconst; /// The constant 4\pi/\Omega in Drummond 2008 formula 6 const real_t kfactor; public: KspaceEwaldTerm(const RealVec& r_in, const RealMat& b_in, real_t kconst_in, real_t kfactor_in) : r(r_in), b(b_in), kconst(kconst_in), kfactor(kfactor_in) {} real_t operator()(const IntVec& i) const { RealVec Kv = dot(i, b); real_t K2 = dot(Kv, Kv); real_t Kr = dot(Kv, r); real_t km = kfactor * std::exp(kconst * K2) * std::cos(Kr) / K2; return km; } }; /** Perform a sum over successively larger cubic integer grids * in DIM dimensional space for arbitrary functors. * * @param function: A functor accepting a point in the grid and * returning the real-valued contribution to the sum from that * point. * * @param zero: Include the origin in the sum (or not). * * @param tol: Tolerance for the sum. Summation ceases when the * contribution to the sum from the outermost cubic shell of * points is less than tol. */ template<typename T> real_t gridSum(T& function, bool zero = true, real_t tol = 1e-11) { real_t dv = std::numeric_limits<real_t>::max(); real_t dva = std::numeric_limits<real_t>::max(); real_t v = 0.0; int_t im = 0; int_t jm = 0; int_t km = 0; IntVec iv; // Add the value at the origin, if requested. if (zero) { iv = 0; v += function(iv); } // Sum over cubic surface shells until the tolerance is reached. while (std::abs(dva) > tol) { dva = 0.0; dv = 0.0; // Surface shell contribution. // Sum over new surface planes perpendicular to the x direction. im += 1; for (int_t i : {-im, im}) for (int_t j = -jm; j < jm + 1; ++j) for (int_t k = -km; k < km + 1; ++k) { iv[0] = i; iv[1] = j; iv[2] = k; real_t f = function(iv); dv += f; dva += std::abs(f); } // Sum over new surface planes perpendicular to the y direction. jm += 1; for (int_t j : {-jm, jm}) for (int_t k = -km; k < km + 1; ++k) for (int_t i = -im; i < im + 1; ++i) { iv[0] = i; iv[1] = j; iv[2] = k; real_t f = function(iv); dv += f; dva += std::abs(f); } // Sum over new surface planes perpendicular to the z direction. km += 1; for (int_t k : {-km, km}) for (int_t i = -im; i < im + 1; ++i) for (int_t j = -jm; j < jm + 1; ++j) { iv[0] = i; iv[1] = j; iv[2] = k; real_t f = function(iv); dv += f; dva += std::abs(f); } v += dv; } return v; } /** Find the optimal kappa for Madelung sums * * The optimal kappa balances the number of points within a given * isosurface of the Gaussians (or limiting Gaussians from erfc) * in the real-space and k-space Madelung terms. The balancing * condition is made under isotropic assumptions, as reflected * by the use of a sphere equal in volume to the simulation cell * to determine the radius. * * @param volume: Volume of the real space cell. */ real_t getKappaMadelung(real_t volume) { real_t radius = std::pow(3. * volume / (4 * M_PI), 1. / 3); return std::sqrt(M_PI) / radius; } /** Compute the Madelung constant to a given tolerance * * Corresponds to the entirety of Drummond 2008 formula 7. * * @param a: Real-space cell axes. * * @param tol: Tolerance for the Madelung constant in Ha. */ real_t madelungSum(const RealMat& a, real_t tol = 1e-10) { // Real-space cell volume real_t volume = std::abs(det(a)); // k-space cell axes RealMat b = 2 * M_PI * transpose(inverse(a)); // k-space cutoff (kappa) real_t kconv = getKappaMadelung(volume); // Set constants for real-/k-space Madelung functors real_t rconst = kconv; real_t kconst = -1. / (4 * std::pow(kconv, 2)); real_t kfactor = 4 * M_PI / volume; // Create real-/k-space fuctors for terms within the sums in formula 7 RspaceMadelungTerm rfunc(a, rconst); KspaceMadelungTerm kfunc(b, kconst, kfactor); // Compute the constant term real_t cval = -M_PI / (std::pow(kconv, 2) * volume) - 2 * kconv / std::sqrt(M_PI); // Compute the real-space sum (excludes zero) real_t rval = gridSum(rfunc, false, tol); // Compute the k-space sum (excludes zero) real_t kval = gridSum(kfunc, false, tol); // Sum all contributions to get the Madelung constant real_t ms = cval + rval + kval; return ms; } /** Find the optimal kappa for Ewald pair sums * * The optimal kappa balances the number of points within a given * isosurface of the Gaussians (or limiting Gaussians from erfc) * in the real-space and k-space Ewald pair terms. The balancing * condition is made under isotropic assumptions, as reflected * by the use of a sphere equal in volume to the simulation cell * to determine the radius. * * @param volume: Volume of the real space cell. */ real_t getKappaEwald(real_t volume) { real_t radius = std::pow(3. * volume / (4 * M_PI), 1. / 3); return radius / std::sqrt(2 * M_PI); } /** Compute the Ewald interaction of a particle pair to a given tolerance * * Corresponds to the entirety of Drummond 2008 formula 6. * * @param r: Inter-particle separation vector. * * @param a: Real-space cell axes. * * @param tol: Tolerance for the Ewald pair interaction in Ha. */ real_t ewaldSum(const RealVec& r, const RealMat& a, real_t tol = 1e-10) { // Real-space cell volume real_t volume = std::abs(det(a)); // k-space cell axes RealMat b = 2 * M_PI * transpose(inverse(a)); // k-space cutoff (kappa) real_t kconv = getKappaEwald(volume); // Set constants for real-/k-space Ewald pair functors real_t rconst = 1. / (std::sqrt(2.) * kconv); real_t kconst = -std::pow(kconv, 2) / 2; real_t kfactor = 4 * M_PI / volume; // Create real-/k-space fuctors for terms within the sums in formula 6 RspaceEwaldTerm rfunc(r, a, rconst); KspaceEwaldTerm kfunc(r, b, kconst, kfactor); // Compute the constant term real_t cval = -2 * M_PI * std::pow(kconv, 2) / volume; // Compute the real-space sum (includes zero) real_t rval = gridSum(rfunc, true, tol); // Compute the k-space sum (excludes zero) real_t kval = gridSum(kfunc, false, tol); // Sum all contributions to get the Ewald pair interaction real_t es = cval + rval + kval; return es; } /** Compute the total Ewald potential energy for a collection of charges * * Corresponds to the entirety of Drummond 2008 formula 5, but for * arbitrary charges. * * @param a: Real-space cell axes. * * @param R: List of particle coordinates. * * @param R: List of particle charges. * * @param tol: Tolerance for the total potential energy in Ha. */ real_t ewaldEnergy(const RealMat& a, const PosArray& R, const ChargeArray& Q, real_t tol = 1e-10) { // Timer for EwaldRef ScopedTimer totalEwaldTimer(*timer_manager.createTimer("EwaldRef")); // Number of particles const size_t N = R.size(); // Total Ewald potential energy real_t ve = 0.0; { // Sum Madelung contributions ScopedTimer totalMadelungTimer(*timer_manager.createTimer("MadelungSum")); // Maximum self-interaction charge product real_t qqmax = 0.0; for (size_t i = 0; i < N; ++i) qqmax = std::max(std::abs(Q[i] * Q[i]), qqmax); // Compute the Madelung term (Drummond 2008 formula 7) real_t vm = madelungSum(a, tol * 2. / qqmax); // Sum the Madelung self interaction for each particle for (size_t i = 0; i < N; ++i) ve += Q[i] * Q[i] * vm / 2; } { // Sum the interaction terms for all particle pairs ScopedTimer EwaldSumTimer(*timer_manager.createTimer("EwaldSum")); int_t Npairs = (N * (N - 1)) / 2; std::vector<real_t> qq(Npairs); for (size_t i = 0, n = 0; i < N; ++i) for (size_t j = 0; j < i; ++j, ++n) qq[n] = Q[i] * Q[j]; std::vector<RealVec> rr(Npairs); for (size_t i = 0, n = 0; i < N; ++i) for (size_t j = 0; j < i; ++j, ++n) rr[n] = R[i] - R[j]; #pragma omp parallel for reduction(+ : ve) for (size_t n = 0; n < Npairs; ++n) ve += qq[n] * ewaldSum(rr[n], a, tol / qq[n]); } return ve; } } // namespace ewaldref } // namespace qmcplusplus #endif
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->inner_length; return 0; } static int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->digest_length; return 0; } int blake2bp_init( blake2bp_state *S, size_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = outlen; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = outlen; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, size_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; if(S->outlen != outlen) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, size_t outlen, size_t inlen, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S,hash) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); }
GB_bitmap_assign_M_row_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_M_row_template: traverse M for GB_ROW_ASSIGN //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // M is a 1-by-(C->vdim) hypersparse or sparse matrix, not a vector, for // GrB_Row_assign (if C is CSC) or GrB_Col_assign (if C is CSR). // C is bitmap/full. M is sparse/hyper, and can be jumbled. { const int64_t *restrict kfirst_Mslice = M_ek_slicing ; const int64_t *restrict klast_Mslice = M_ek_slicing + M_ntasks ; const int64_t *restrict pstart_Mslice = M_ek_slicing + M_ntasks * 2 ; ASSERT (mvlen == 1) ; int64_t iC = I [0] ; int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // traverse over M (0,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(0,k) for this task //------------------------------------------------------------------ int64_t jM = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //------------------------------------------------------------------ // traverse over M(0,jM), the kth vector of M //------------------------------------------------------------------ // for row_assign: M is a single row, iC = I [0] // It has either 0 or 1 entry. int64_t pM = pM_start ; if (pM < pM_end) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t jC = jM ; int64_t pC = iC + jC * cvlen ; GB_MASK_WORK (pC) ; } } } cnvals += task_cnvals ; } }
mapc.c
#include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <memory.h> #include <math.h> #include <float.h> #include <assert.h> #include <omp.h> /* ////////////////////////////////////////////////////// Example: create a 256x256 float image with 1 component: struct m_image foo1 = M_IMAGE_IDENTITY(); struct m_image foo2 = M_IMAGE_IDENTITY(); int x, y; m_image_create(&foo1, M_FLOAT, 256, 256, 1); memset(foo1.data, 0, foo1.size * sizeof(float)); // clear to zero y = 128; x = 128; ((float *)foo1.data)[y * foo1.width + x] = 1.0f; // set (x, y) pixel to one m_image_gaussian_blur(&foo2, &foo1, 3, 3); // apply Gaussian blur m_image_destroy(&foo2); m_image_destroy(&foo1); */ struct m_image { void *data; int size; int width; int height; int comp; char type; }; #define M_VOID 0 #define M_BOOL 1 #define M_BYTE 2 #define M_UBYTE 3 #define M_SHORT 4 #define M_USHORT 5 #define M_INT 6 #define M_UINT 7 #define M_HALF 8 #define M_FLOAT 9 #define M_DOUBLE 10 #ifndef M_SAFE_FREE #define M_SAFE_FREE(p) {if (p) {free(p); (p) = NULL;}} #endif /* m_image type util */ int m_type_sizeof(char type); /* fully supported types are: M_UBYTE, M_USHORT, M_HALF, M_FLOAT partially supported types: M_BYTE, M_SHORT, M_INT, M_UINT (no support for conversion) */ void m_image_create(struct m_image *image, char type_, int width, int height, int comp); void m_image_destroy(struct m_image *image); void inline m_flip_buffer(struct m_image *src, struct m_image *dest); int inline vmap_buffer_c(int index, int width, int height, int depth); void inline test_array_inplace(struct m_image *src); void inline test_rgb_inplace(struct m_image *src, struct m_image *red, struct m_image *green, struct m_image *blue); uint32_t m__exponent[64] = { 0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000, 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0a000000, 0x0a800000, 0x0b000000, 0x0b800000, 0x0c000000, 0x0c800000, 0x0d000000, 0x0d800000, 0x0e000000, 0x0e800000, 0x0f000000, 0x47800000, 0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000, 0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8a000000, 0x8a800000, 0x8b000000, 0x8b800000, 0x8c000000, 0x8c800000, 0x8d000000, 0x8d800000, 0x8e000000, 0x8e800000, 0x8f000000, 0xc7800000 }; uint32_t m__mantissa[2048] = { 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34a00000, 0x34c00000, 0x34e00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000, 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35a00000, 0x35a80000, 0x35b00000, 0x35b80000, 0x35c00000, 0x35c80000, 0x35d00000, 0x35d80000, 0x35e00000, 0x35e80000, 0x35f00000, 0x35f80000, 0x36000000, 0x36040000, 0x36080000, 0x360c0000, 0x36100000, 0x36140000, 0x36180000, 0x361c0000, 0x36200000, 0x36240000, 0x36280000, 0x362c0000, 0x36300000, 0x36340000, 0x36380000, 0x363c0000, 0x36400000, 0x36440000, 0x36480000, 0x364c0000, 0x36500000, 0x36540000, 0x36580000, 0x365c0000, 0x36600000, 0x36640000, 0x36680000, 0x366c0000, 0x36700000, 0x36740000, 0x36780000, 0x367c0000, 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368a0000, 0x368c0000, 0x368e0000, 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369a0000, 0x369c0000, 0x369e0000, 0x36a00000, 0x36a20000, 0x36a40000, 0x36a60000, 0x36a80000, 0x36aa0000, 0x36ac0000, 0x36ae0000, 0x36b00000, 0x36b20000, 0x36b40000, 0x36b60000, 0x36b80000, 0x36ba0000, 0x36bc0000, 0x36be0000, 0x36c00000, 0x36c20000, 0x36c40000, 0x36c60000, 0x36c80000, 0x36ca0000, 0x36cc0000, 0x36ce0000, 0x36d00000, 0x36d20000, 0x36d40000, 0x36d60000, 0x36d80000, 0x36da0000, 0x36dc0000, 0x36de0000, 0x36e00000, 0x36e20000, 0x36e40000, 0x36e60000, 0x36e80000, 0x36ea0000, 0x36ec0000, 0x36ee0000, 0x36f00000, 0x36f20000, 0x36f40000, 0x36f60000, 0x36f80000, 0x36fa0000, 0x36fc0000, 0x36fe0000, 0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000, 0x370a0000, 0x370b0000, 0x370c0000, 0x370d0000, 0x370e0000, 0x370f0000, 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371a0000, 0x371b0000, 0x371c0000, 0x371d0000, 0x371e0000, 0x371f0000, 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000, 0x37280000, 0x37290000, 0x372a0000, 0x372b0000, 0x372c0000, 0x372d0000, 0x372e0000, 0x372f0000, 0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000, 0x373a0000, 0x373b0000, 0x373c0000, 0x373d0000, 0x373e0000, 0x373f0000, 0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374a0000, 0x374b0000, 0x374c0000, 0x374d0000, 0x374e0000, 0x374f0000, 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000, 0x37580000, 0x37590000, 0x375a0000, 0x375b0000, 0x375c0000, 0x375d0000, 0x375e0000, 0x375f0000, 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000, 0x376a0000, 0x376b0000, 0x376c0000, 0x376d0000, 0x376e0000, 0x376f0000, 0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377a0000, 0x377b0000, 0x377c0000, 0x377d0000, 0x377e0000, 0x377f0000, 0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000, 0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378a0000, 0x378a8000, 0x378b0000, 0x378b8000, 0x378c0000, 0x378c8000, 0x378d0000, 0x378d8000, 0x378e0000, 0x378e8000, 0x378f0000, 0x378f8000, 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000, 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379a0000, 0x379a8000, 0x379b0000, 0x379b8000, 0x379c0000, 0x379c8000, 0x379d0000, 0x379d8000, 0x379e0000, 0x379e8000, 0x379f0000, 0x379f8000, 0x37a00000, 0x37a08000, 0x37a10000, 0x37a18000, 0x37a20000, 0x37a28000, 0x37a30000, 0x37a38000, 0x37a40000, 0x37a48000, 0x37a50000, 0x37a58000, 0x37a60000, 0x37a68000, 0x37a70000, 0x37a78000, 0x37a80000, 0x37a88000, 0x37a90000, 0x37a98000, 0x37aa0000, 0x37aa8000, 0x37ab0000, 0x37ab8000, 0x37ac0000, 0x37ac8000, 0x37ad0000, 0x37ad8000, 0x37ae0000, 0x37ae8000, 0x37af0000, 0x37af8000, 0x37b00000, 0x37b08000, 0x37b10000, 0x37b18000, 0x37b20000, 0x37b28000, 0x37b30000, 0x37b38000, 0x37b40000, 0x37b48000, 0x37b50000, 0x37b58000, 0x37b60000, 0x37b68000, 0x37b70000, 0x37b78000, 0x37b80000, 0x37b88000, 0x37b90000, 0x37b98000, 0x37ba0000, 0x37ba8000, 0x37bb0000, 0x37bb8000, 0x37bc0000, 0x37bc8000, 0x37bd0000, 0x37bd8000, 0x37be0000, 0x37be8000, 0x37bf0000, 0x37bf8000, 0x37c00000, 0x37c08000, 0x37c10000, 0x37c18000, 0x37c20000, 0x37c28000, 0x37c30000, 0x37c38000, 0x37c40000, 0x37c48000, 0x37c50000, 0x37c58000, 0x37c60000, 0x37c68000, 0x37c70000, 0x37c78000, 0x37c80000, 0x37c88000, 0x37c90000, 0x37c98000, 0x37ca0000, 0x37ca8000, 0x37cb0000, 0x37cb8000, 0x37cc0000, 0x37cc8000, 0x37cd0000, 0x37cd8000, 0x37ce0000, 0x37ce8000, 0x37cf0000, 0x37cf8000, 0x37d00000, 0x37d08000, 0x37d10000, 0x37d18000, 0x37d20000, 0x37d28000, 0x37d30000, 0x37d38000, 0x37d40000, 0x37d48000, 0x37d50000, 0x37d58000, 0x37d60000, 0x37d68000, 0x37d70000, 0x37d78000, 0x37d80000, 0x37d88000, 0x37d90000, 0x37d98000, 0x37da0000, 0x37da8000, 0x37db0000, 0x37db8000, 0x37dc0000, 0x37dc8000, 0x37dd0000, 0x37dd8000, 0x37de0000, 0x37de8000, 0x37df0000, 0x37df8000, 0x37e00000, 0x37e08000, 0x37e10000, 0x37e18000, 0x37e20000, 0x37e28000, 0x37e30000, 0x37e38000, 0x37e40000, 0x37e48000, 0x37e50000, 0x37e58000, 0x37e60000, 0x37e68000, 0x37e70000, 0x37e78000, 0x37e80000, 0x37e88000, 0x37e90000, 0x37e98000, 0x37ea0000, 0x37ea8000, 0x37eb0000, 0x37eb8000, 0x37ec0000, 0x37ec8000, 0x37ed0000, 0x37ed8000, 0x37ee0000, 0x37ee8000, 0x37ef0000, 0x37ef8000, 0x37f00000, 0x37f08000, 0x37f10000, 0x37f18000, 0x37f20000, 0x37f28000, 0x37f30000, 0x37f38000, 0x37f40000, 0x37f48000, 0x37f50000, 0x37f58000, 0x37f60000, 0x37f68000, 0x37f70000, 0x37f78000, 0x37f80000, 0x37f88000, 0x37f90000, 0x37f98000, 0x37fa0000, 0x37fa8000, 0x37fb0000, 0x37fb8000, 0x37fc0000, 0x37fc8000, 0x37fd0000, 0x37fd8000, 0x37fe0000, 0x37fe8000, 0x37ff0000, 0x37ff8000, 0x38000000, 0x38004000, 0x38008000, 0x3800c000, 0x38010000, 0x38014000, 0x38018000, 0x3801c000, 0x38020000, 0x38024000, 0x38028000, 0x3802c000, 0x38030000, 0x38034000, 0x38038000, 0x3803c000, 0x38040000, 0x38044000, 0x38048000, 0x3804c000, 0x38050000, 0x38054000, 0x38058000, 0x3805c000, 0x38060000, 0x38064000, 0x38068000, 0x3806c000, 0x38070000, 0x38074000, 0x38078000, 0x3807c000, 0x38080000, 0x38084000, 0x38088000, 0x3808c000, 0x38090000, 0x38094000, 0x38098000, 0x3809c000, 0x380a0000, 0x380a4000, 0x380a8000, 0x380ac000, 0x380b0000, 0x380b4000, 0x380b8000, 0x380bc000, 0x380c0000, 0x380c4000, 0x380c8000, 0x380cc000, 0x380d0000, 0x380d4000, 0x380d8000, 0x380dc000, 0x380e0000, 0x380e4000, 0x380e8000, 0x380ec000, 0x380f0000, 0x380f4000, 0x380f8000, 0x380fc000, 0x38100000, 0x38104000, 0x38108000, 0x3810c000, 0x38110000, 0x38114000, 0x38118000, 0x3811c000, 0x38120000, 0x38124000, 0x38128000, 0x3812c000, 0x38130000, 0x38134000, 0x38138000, 0x3813c000, 0x38140000, 0x38144000, 0x38148000, 0x3814c000, 0x38150000, 0x38154000, 0x38158000, 0x3815c000, 0x38160000, 0x38164000, 0x38168000, 0x3816c000, 0x38170000, 0x38174000, 0x38178000, 0x3817c000, 0x38180000, 0x38184000, 0x38188000, 0x3818c000, 0x38190000, 0x38194000, 0x38198000, 0x3819c000, 0x381a0000, 0x381a4000, 0x381a8000, 0x381ac000, 0x381b0000, 0x381b4000, 0x381b8000, 0x381bc000, 0x381c0000, 0x381c4000, 0x381c8000, 0x381cc000, 0x381d0000, 0x381d4000, 0x381d8000, 0x381dc000, 0x381e0000, 0x381e4000, 0x381e8000, 0x381ec000, 0x381f0000, 0x381f4000, 0x381f8000, 0x381fc000, 0x38200000, 0x38204000, 0x38208000, 0x3820c000, 0x38210000, 0x38214000, 0x38218000, 0x3821c000, 0x38220000, 0x38224000, 0x38228000, 0x3822c000, 0x38230000, 0x38234000, 0x38238000, 0x3823c000, 0x38240000, 0x38244000, 0x38248000, 0x3824c000, 0x38250000, 0x38254000, 0x38258000, 0x3825c000, 0x38260000, 0x38264000, 0x38268000, 0x3826c000, 0x38270000, 0x38274000, 0x38278000, 0x3827c000, 0x38280000, 0x38284000, 0x38288000, 0x3828c000, 0x38290000, 0x38294000, 0x38298000, 0x3829c000, 0x382a0000, 0x382a4000, 0x382a8000, 0x382ac000, 0x382b0000, 0x382b4000, 0x382b8000, 0x382bc000, 0x382c0000, 0x382c4000, 0x382c8000, 0x382cc000, 0x382d0000, 0x382d4000, 0x382d8000, 0x382dc000, 0x382e0000, 0x382e4000, 0x382e8000, 0x382ec000, 0x382f0000, 0x382f4000, 0x382f8000, 0x382fc000, 0x38300000, 0x38304000, 0x38308000, 0x3830c000, 0x38310000, 0x38314000, 0x38318000, 0x3831c000, 0x38320000, 0x38324000, 0x38328000, 0x3832c000, 0x38330000, 0x38334000, 0x38338000, 0x3833c000, 0x38340000, 0x38344000, 0x38348000, 0x3834c000, 0x38350000, 0x38354000, 0x38358000, 0x3835c000, 0x38360000, 0x38364000, 0x38368000, 0x3836c000, 0x38370000, 0x38374000, 0x38378000, 0x3837c000, 0x38380000, 0x38384000, 0x38388000, 0x3838c000, 0x38390000, 0x38394000, 0x38398000, 0x3839c000, 0x383a0000, 0x383a4000, 0x383a8000, 0x383ac000, 0x383b0000, 0x383b4000, 0x383b8000, 0x383bc000, 0x383c0000, 0x383c4000, 0x383c8000, 0x383cc000, 0x383d0000, 0x383d4000, 0x383d8000, 0x383dc000, 0x383e0000, 0x383e4000, 0x383e8000, 0x383ec000, 0x383f0000, 0x383f4000, 0x383f8000, 0x383fc000, 0x38400000, 0x38404000, 0x38408000, 0x3840c000, 0x38410000, 0x38414000, 0x38418000, 0x3841c000, 0x38420000, 0x38424000, 0x38428000, 0x3842c000, 0x38430000, 0x38434000, 0x38438000, 0x3843c000, 0x38440000, 0x38444000, 0x38448000, 0x3844c000, 0x38450000, 0x38454000, 0x38458000, 0x3845c000, 0x38460000, 0x38464000, 0x38468000, 0x3846c000, 0x38470000, 0x38474000, 0x38478000, 0x3847c000, 0x38480000, 0x38484000, 0x38488000, 0x3848c000, 0x38490000, 0x38494000, 0x38498000, 0x3849c000, 0x384a0000, 0x384a4000, 0x384a8000, 0x384ac000, 0x384b0000, 0x384b4000, 0x384b8000, 0x384bc000, 0x384c0000, 0x384c4000, 0x384c8000, 0x384cc000, 0x384d0000, 0x384d4000, 0x384d8000, 0x384dc000, 0x384e0000, 0x384e4000, 0x384e8000, 0x384ec000, 0x384f0000, 0x384f4000, 0x384f8000, 0x384fc000, 0x38500000, 0x38504000, 0x38508000, 0x3850c000, 0x38510000, 0x38514000, 0x38518000, 0x3851c000, 0x38520000, 0x38524000, 0x38528000, 0x3852c000, 0x38530000, 0x38534000, 0x38538000, 0x3853c000, 0x38540000, 0x38544000, 0x38548000, 0x3854c000, 0x38550000, 0x38554000, 0x38558000, 0x3855c000, 0x38560000, 0x38564000, 0x38568000, 0x3856c000, 0x38570000, 0x38574000, 0x38578000, 0x3857c000, 0x38580000, 0x38584000, 0x38588000, 0x3858c000, 0x38590000, 0x38594000, 0x38598000, 0x3859c000, 0x385a0000, 0x385a4000, 0x385a8000, 0x385ac000, 0x385b0000, 0x385b4000, 0x385b8000, 0x385bc000, 0x385c0000, 0x385c4000, 0x385c8000, 0x385cc000, 0x385d0000, 0x385d4000, 0x385d8000, 0x385dc000, 0x385e0000, 0x385e4000, 0x385e8000, 0x385ec000, 0x385f0000, 0x385f4000, 0x385f8000, 0x385fc000, 0x38600000, 0x38604000, 0x38608000, 0x3860c000, 0x38610000, 0x38614000, 0x38618000, 0x3861c000, 0x38620000, 0x38624000, 0x38628000, 0x3862c000, 0x38630000, 0x38634000, 0x38638000, 0x3863c000, 0x38640000, 0x38644000, 0x38648000, 0x3864c000, 0x38650000, 0x38654000, 0x38658000, 0x3865c000, 0x38660000, 0x38664000, 0x38668000, 0x3866c000, 0x38670000, 0x38674000, 0x38678000, 0x3867c000, 0x38680000, 0x38684000, 0x38688000, 0x3868c000, 0x38690000, 0x38694000, 0x38698000, 0x3869c000, 0x386a0000, 0x386a4000, 0x386a8000, 0x386ac000, 0x386b0000, 0x386b4000, 0x386b8000, 0x386bc000, 0x386c0000, 0x386c4000, 0x386c8000, 0x386cc000, 0x386d0000, 0x386d4000, 0x386d8000, 0x386dc000, 0x386e0000, 0x386e4000, 0x386e8000, 0x386ec000, 0x386f0000, 0x386f4000, 0x386f8000, 0x386fc000, 0x38700000, 0x38704000, 0x38708000, 0x3870c000, 0x38710000, 0x38714000, 0x38718000, 0x3871c000, 0x38720000, 0x38724000, 0x38728000, 0x3872c000, 0x38730000, 0x38734000, 0x38738000, 0x3873c000, 0x38740000, 0x38744000, 0x38748000, 0x3874c000, 0x38750000, 0x38754000, 0x38758000, 0x3875c000, 0x38760000, 0x38764000, 0x38768000, 0x3876c000, 0x38770000, 0x38774000, 0x38778000, 0x3877c000, 0x38780000, 0x38784000, 0x38788000, 0x3878c000, 0x38790000, 0x38794000, 0x38798000, 0x3879c000, 0x387a0000, 0x387a4000, 0x387a8000, 0x387ac000, 0x387b0000, 0x387b4000, 0x387b8000, 0x387bc000, 0x387c0000, 0x387c4000, 0x387c8000, 0x387cc000, 0x387d0000, 0x387d4000, 0x387d8000, 0x387dc000, 0x387e0000, 0x387e4000, 0x387e8000, 0x387ec000, 0x387f0000, 0x387f4000, 0x387f8000, 0x387fc000, 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800a000, 0x3800c000, 0x3800e000, 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801a000, 0x3801c000, 0x3801e000, 0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802a000, 0x3802c000, 0x3802e000, 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803a000, 0x3803c000, 0x3803e000, 0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804a000, 0x3804c000, 0x3804e000, 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805a000, 0x3805c000, 0x3805e000, 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806a000, 0x3806c000, 0x3806e000, 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807a000, 0x3807c000, 0x3807e000, 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808a000, 0x3808c000, 0x3808e000, 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809a000, 0x3809c000, 0x3809e000, 0x380a0000, 0x380a2000, 0x380a4000, 0x380a6000, 0x380a8000, 0x380aa000, 0x380ac000, 0x380ae000, 0x380b0000, 0x380b2000, 0x380b4000, 0x380b6000, 0x380b8000, 0x380ba000, 0x380bc000, 0x380be000, 0x380c0000, 0x380c2000, 0x380c4000, 0x380c6000, 0x380c8000, 0x380ca000, 0x380cc000, 0x380ce000, 0x380d0000, 0x380d2000, 0x380d4000, 0x380d6000, 0x380d8000, 0x380da000, 0x380dc000, 0x380de000, 0x380e0000, 0x380e2000, 0x380e4000, 0x380e6000, 0x380e8000, 0x380ea000, 0x380ec000, 0x380ee000, 0x380f0000, 0x380f2000, 0x380f4000, 0x380f6000, 0x380f8000, 0x380fa000, 0x380fc000, 0x380fe000, 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810a000, 0x3810c000, 0x3810e000, 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811a000, 0x3811c000, 0x3811e000, 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812a000, 0x3812c000, 0x3812e000, 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813a000, 0x3813c000, 0x3813e000, 0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814a000, 0x3814c000, 0x3814e000, 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815a000, 0x3815c000, 0x3815e000, 0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816a000, 0x3816c000, 0x3816e000, 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817a000, 0x3817c000, 0x3817e000, 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818a000, 0x3818c000, 0x3818e000, 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819a000, 0x3819c000, 0x3819e000, 0x381a0000, 0x381a2000, 0x381a4000, 0x381a6000, 0x381a8000, 0x381aa000, 0x381ac000, 0x381ae000, 0x381b0000, 0x381b2000, 0x381b4000, 0x381b6000, 0x381b8000, 0x381ba000, 0x381bc000, 0x381be000, 0x381c0000, 0x381c2000, 0x381c4000, 0x381c6000, 0x381c8000, 0x381ca000, 0x381cc000, 0x381ce000, 0x381d0000, 0x381d2000, 0x381d4000, 0x381d6000, 0x381d8000, 0x381da000, 0x381dc000, 0x381de000, 0x381e0000, 0x381e2000, 0x381e4000, 0x381e6000, 0x381e8000, 0x381ea000, 0x381ec000, 0x381ee000, 0x381f0000, 0x381f2000, 0x381f4000, 0x381f6000, 0x381f8000, 0x381fa000, 0x381fc000, 0x381fe000, 0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820a000, 0x3820c000, 0x3820e000, 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821a000, 0x3821c000, 0x3821e000, 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822a000, 0x3822c000, 0x3822e000, 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823a000, 0x3823c000, 0x3823e000, 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824a000, 0x3824c000, 0x3824e000, 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825a000, 0x3825c000, 0x3825e000, 0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826a000, 0x3826c000, 0x3826e000, 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827a000, 0x3827c000, 0x3827e000, 0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828a000, 0x3828c000, 0x3828e000, 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829a000, 0x3829c000, 0x3829e000, 0x382a0000, 0x382a2000, 0x382a4000, 0x382a6000, 0x382a8000, 0x382aa000, 0x382ac000, 0x382ae000, 0x382b0000, 0x382b2000, 0x382b4000, 0x382b6000, 0x382b8000, 0x382ba000, 0x382bc000, 0x382be000, 0x382c0000, 0x382c2000, 0x382c4000, 0x382c6000, 0x382c8000, 0x382ca000, 0x382cc000, 0x382ce000, 0x382d0000, 0x382d2000, 0x382d4000, 0x382d6000, 0x382d8000, 0x382da000, 0x382dc000, 0x382de000, 0x382e0000, 0x382e2000, 0x382e4000, 0x382e6000, 0x382e8000, 0x382ea000, 0x382ec000, 0x382ee000, 0x382f0000, 0x382f2000, 0x382f4000, 0x382f6000, 0x382f8000, 0x382fa000, 0x382fc000, 0x382fe000, 0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830a000, 0x3830c000, 0x3830e000, 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831a000, 0x3831c000, 0x3831e000, 0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832a000, 0x3832c000, 0x3832e000, 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833a000, 0x3833c000, 0x3833e000, 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834a000, 0x3834c000, 0x3834e000, 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835a000, 0x3835c000, 0x3835e000, 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836a000, 0x3836c000, 0x3836e000, 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837a000, 0x3837c000, 0x3837e000, 0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838a000, 0x3838c000, 0x3838e000, 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839a000, 0x3839c000, 0x3839e000, 0x383a0000, 0x383a2000, 0x383a4000, 0x383a6000, 0x383a8000, 0x383aa000, 0x383ac000, 0x383ae000, 0x383b0000, 0x383b2000, 0x383b4000, 0x383b6000, 0x383b8000, 0x383ba000, 0x383bc000, 0x383be000, 0x383c0000, 0x383c2000, 0x383c4000, 0x383c6000, 0x383c8000, 0x383ca000, 0x383cc000, 0x383ce000, 0x383d0000, 0x383d2000, 0x383d4000, 0x383d6000, 0x383d8000, 0x383da000, 0x383dc000, 0x383de000, 0x383e0000, 0x383e2000, 0x383e4000, 0x383e6000, 0x383e8000, 0x383ea000, 0x383ec000, 0x383ee000, 0x383f0000, 0x383f2000, 0x383f4000, 0x383f6000, 0x383f8000, 0x383fa000, 0x383fc000, 0x383fe000, 0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840a000, 0x3840c000, 0x3840e000, 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841a000, 0x3841c000, 0x3841e000, 0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842a000, 0x3842c000, 0x3842e000, 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843a000, 0x3843c000, 0x3843e000, 0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844a000, 0x3844c000, 0x3844e000, 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845a000, 0x3845c000, 0x3845e000, 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846a000, 0x3846c000, 0x3846e000, 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847a000, 0x3847c000, 0x3847e000, 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848a000, 0x3848c000, 0x3848e000, 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849a000, 0x3849c000, 0x3849e000, 0x384a0000, 0x384a2000, 0x384a4000, 0x384a6000, 0x384a8000, 0x384aa000, 0x384ac000, 0x384ae000, 0x384b0000, 0x384b2000, 0x384b4000, 0x384b6000, 0x384b8000, 0x384ba000, 0x384bc000, 0x384be000, 0x384c0000, 0x384c2000, 0x384c4000, 0x384c6000, 0x384c8000, 0x384ca000, 0x384cc000, 0x384ce000, 0x384d0000, 0x384d2000, 0x384d4000, 0x384d6000, 0x384d8000, 0x384da000, 0x384dc000, 0x384de000, 0x384e0000, 0x384e2000, 0x384e4000, 0x384e6000, 0x384e8000, 0x384ea000, 0x384ec000, 0x384ee000, 0x384f0000, 0x384f2000, 0x384f4000, 0x384f6000, 0x384f8000, 0x384fa000, 0x384fc000, 0x384fe000, 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850a000, 0x3850c000, 0x3850e000, 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851a000, 0x3851c000, 0x3851e000, 0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852a000, 0x3852c000, 0x3852e000, 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853a000, 0x3853c000, 0x3853e000, 0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854a000, 0x3854c000, 0x3854e000, 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855a000, 0x3855c000, 0x3855e000, 0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856a000, 0x3856c000, 0x3856e000, 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857a000, 0x3857c000, 0x3857e000, 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858a000, 0x3858c000, 0x3858e000, 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859a000, 0x3859c000, 0x3859e000, 0x385a0000, 0x385a2000, 0x385a4000, 0x385a6000, 0x385a8000, 0x385aa000, 0x385ac000, 0x385ae000, 0x385b0000, 0x385b2000, 0x385b4000, 0x385b6000, 0x385b8000, 0x385ba000, 0x385bc000, 0x385be000, 0x385c0000, 0x385c2000, 0x385c4000, 0x385c6000, 0x385c8000, 0x385ca000, 0x385cc000, 0x385ce000, 0x385d0000, 0x385d2000, 0x385d4000, 0x385d6000, 0x385d8000, 0x385da000, 0x385dc000, 0x385de000, 0x385e0000, 0x385e2000, 0x385e4000, 0x385e6000, 0x385e8000, 0x385ea000, 0x385ec000, 0x385ee000, 0x385f0000, 0x385f2000, 0x385f4000, 0x385f6000, 0x385f8000, 0x385fa000, 0x385fc000, 0x385fe000, 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860a000, 0x3860c000, 0x3860e000, 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861a000, 0x3861c000, 0x3861e000, 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862a000, 0x3862c000, 0x3862e000, 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863a000, 0x3863c000, 0x3863e000, 0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864a000, 0x3864c000, 0x3864e000, 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865a000, 0x3865c000, 0x3865e000, 0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866a000, 0x3866c000, 0x3866e000, 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867a000, 0x3867c000, 0x3867e000, 0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868a000, 0x3868c000, 0x3868e000, 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869a000, 0x3869c000, 0x3869e000, 0x386a0000, 0x386a2000, 0x386a4000, 0x386a6000, 0x386a8000, 0x386aa000, 0x386ac000, 0x386ae000, 0x386b0000, 0x386b2000, 0x386b4000, 0x386b6000, 0x386b8000, 0x386ba000, 0x386bc000, 0x386be000, 0x386c0000, 0x386c2000, 0x386c4000, 0x386c6000, 0x386c8000, 0x386ca000, 0x386cc000, 0x386ce000, 0x386d0000, 0x386d2000, 0x386d4000, 0x386d6000, 0x386d8000, 0x386da000, 0x386dc000, 0x386de000, 0x386e0000, 0x386e2000, 0x386e4000, 0x386e6000, 0x386e8000, 0x386ea000, 0x386ec000, 0x386ee000, 0x386f0000, 0x386f2000, 0x386f4000, 0x386f6000, 0x386f8000, 0x386fa000, 0x386fc000, 0x386fe000, 0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870a000, 0x3870c000, 0x3870e000, 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871a000, 0x3871c000, 0x3871e000, 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872a000, 0x3872c000, 0x3872e000, 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873a000, 0x3873c000, 0x3873e000, 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874a000, 0x3874c000, 0x3874e000, 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875a000, 0x3875c000, 0x3875e000, 0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876a000, 0x3876c000, 0x3876e000, 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877a000, 0x3877c000, 0x3877e000, 0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878a000, 0x3878c000, 0x3878e000, 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879a000, 0x3879c000, 0x3879e000, 0x387a0000, 0x387a2000, 0x387a4000, 0x387a6000, 0x387a8000, 0x387aa000, 0x387ac000, 0x387ae000, 0x387b0000, 0x387b2000, 0x387b4000, 0x387b6000, 0x387b8000, 0x387ba000, 0x387bc000, 0x387be000, 0x387c0000, 0x387c2000, 0x387c4000, 0x387c6000, 0x387c8000, 0x387ca000, 0x387cc000, 0x387ce000, 0x387d0000, 0x387d2000, 0x387d4000, 0x387d6000, 0x387d8000, 0x387da000, 0x387dc000, 0x387de000, 0x387e0000, 0x387e2000, 0x387e4000, 0x387e6000, 0x387e8000, 0x387ea000, 0x387ec000, 0x387ee000, 0x387f0000, 0x387f2000, 0x387f4000, 0x387f6000, 0x387f8000, 0x387fa000, 0x387fc000, 0x387fe000 }; int m_type_sizeof(char type) { switch (type) { case M_BYTE: case M_UBYTE: return sizeof(uint8_t); break; case M_SHORT: case M_USHORT: case M_HALF: return sizeof(uint16_t); break; case M_BOOL: case M_INT: case M_UINT: return sizeof(uint32_t); break; case M_FLOAT: return sizeof(float); break; case M_DOUBLE: return sizeof(double); break; default: assert(0); return 0; } } void m_image_create(struct m_image *image, char type_, int width, int height, int comp) { // Init structure image image->data = 0; image->size = 0; image->width = 0; image->height = 0; image->type = 0; image->comp = 0; int size = width * height * comp; assert(size > 0); M_SAFE_FREE(image->data); // Allocate memory image->data = malloc(size * m_type_sizeof(type_)); if( !image->data ) printf("BAD ALLOC:m_image_create\n"); image->type = type_; image->width = width; image->height = height; image->comp = comp; image->size = size; // Reset all the pixels memset(image->data, 0, image->size * sizeof(unsigned char)); } void m_image_destroy(struct m_image *image) { M_SAFE_FREE(image->data); memset(image, 0, sizeof(struct m_image)); } inline int vmap_buffer_c(int index, int width, int height, int depth) { /* Vertically flipped a single buffer value. :param index: integer; index value :param width: integer; image width :param height: integer; image height :param depth: integer; image depth (3)RGB or (4)RGBA :return: integer value pointing to the pixel in the buffer (traversed vertically). */ int x, y, z, ix; ix = (int)(index / 4); y = (int)(ix / height); x = ix % height; z = index % depth; return (x * width * depth) + (depth * y) + z; } void inline m_flip_buffer(struct m_image *src, struct m_image *dst) { unsigned char *src_p, *dst_p; src_p = (unsigned char *)src->data; dst_p = (unsigned char *)dst->data; int i, index, avg; int d = src->comp; for (i=0; i<src->size; i+=d){ index = vmap_buffer_c(i, src->width, src->height, d); avg = (unsigned char)((src_p[i] + src_p[i + 1] + src_p[i + 2]) / 3.0); dst_p[i ] = avg; dst_p[i + 1] = avg; dst_p[i + 2] = avg; } } void inline test_array_inplace(struct m_image *src) { unsigned char *src_p; src_p = (unsigned char *)src->data; int i; int d = src->comp; for (i=0; i<src->size; i+=1){ src_p[i] = src_p[i]; } } void inline test_rgb_inplace(struct m_image *src, struct m_image *red, struct m_image *green, struct m_image *blue) { unsigned char *src_p; unsigned char *red_p; unsigned char *green_p; unsigned char *blue_p; src_p = (unsigned char *)src->data; red_p = (unsigned char *)red->data; green_p = (unsigned char *)green->data; blue_p = (unsigned char *)blue->data; int i; int d = src->comp; int n = src->size; #pragma omp for schedule(static) nowait for (i=0; i<n; i+=3){ red_p[i] = src_p[i]; red_p[i+1] = 1; red_p[i+2] = 1; green_p[i+1] = src_p[i+1]; green_p[i] = 1; green_p[i+2] = 1; blue_p[i] = 1; blue_p[i+1] = 1; blue_p[i+2] = src_p[i+2]; } } int main(){ return 0; }
setsketch.h
#ifndef EHLL_H__ #define EHLL_H__ #include <stdexcept> #include <cassert> #include "aesctr/wy.h" #include <queue> #include "sketch/div.h" #include <unordered_map> #include <memory> #include "fy.h" #include "sketch/count_eq.h" #include "sketch/macros.h" #include "sketch/hash.h" #include "xxHash/xxh3.h" #include "flat_hash_map/flat_hash_map.hpp" namespace sketch { namespace setsketch { namespace detail { struct Deleter { template<typename T> void operator()(const T *x) const {std::free(const_cast<T *>(x));} }; template <class F, class T> std::tuple<T, T, uint64_t> brent_find_minima(const F &f, T min, T max, int bits=std::numeric_limits<T>::digits, uint64_t max_iter=std::numeric_limits<uint64_t>::max()) noexcept { T x, w, v, u, delta, delta2, fu, fv, fw, fx, mid, fract1, fract2; const T tolerance = static_cast<T>(std::ldexp(1.0, 1-bits)); static constexpr T golden = 0.3819660; // golden ratio, don't need too much precision here! x = w = v = max; fw = fv = fx = f(x); delta2 = delta = 0; uint64_t count = max_iter; do { mid = (min + max) / 2; fract1 = tolerance * std::abs(x) + tolerance / 4; fract2 = 2 * fract1; if(std::abs(x - mid) <= (fract2 - (max - min) / 2)) break; if(std::abs(delta2) > fract1) { T r = (x - w) * (fx - fv); T q = (x - v) * (fx - fw); T p = (x - v) * q - (x - w) * r; q = 2 * (q - r); if(q > 0) p = -p; else q = -q; T td = delta2; delta2 = delta; if((std::abs(p) >= std::abs(q * td / 2)) || (p <= q * (min - x)) || (p >= q * (max - x))) { delta2 = (x >= mid) ? min - x : max - x; delta = golden * delta2; } else { delta = p / q; u = x + delta; if(((u - min) < fract2) || ((max- u) < fract2)) delta = (mid - x) < 0 ? (T)-std::abs(fract1) : (T)std::abs(fract1); } } else { delta2 = (x >= mid) ? min - x : max - x; delta = golden * delta2; } u = (std::abs(delta) >= fract1) ? T(x + delta) : (delta > 0 ? T(x + std::abs(fract1)) : T(x - std::abs(fract1))); fu = f(u); if(fu <= fx) { if(u >= x) min = x; else max = x; v = w;w = x; x = u; fv = fw; fw = fx; fx = fu; } else { // Oh dear, point u is worse than what we have already, // even so it *must* be better than one of our endpoints: if(u < x) min = u; else max = u; if((fu <= fw) || (w == x)) v = w, w = u, fv = fw, fw = fu; else if((fu <= fv) || (v == x) || (v == w)) v = u, fv = fu; } } while(--count); return std::make_tuple(x, fx, max_iter - count); } } template<typename FT> static inline FT jmle_simple(const uint64_t lhgt, const uint64_t rhgt, const size_t m, const FT lhest, const FT rhest, FT base) { if(!lhest && !rhest) return FT(0.); const uint64_t neq = m - (lhgt + rhgt); const FT sumest = lhest + rhest; const long double bi = 1.L / base; const long double lbase = std::log(static_cast<long double>(base)), lbi = 1. / lbase; //const long double lbdb = base - 1. ? std::log1p(base - 1.L) / (base - 1.L): 1.L; const FT z = (1.L - bi) / (sumest); auto func = [neq,lhgt,rhgt,lbi,z,rhest,lhest](auto jaccard) { FT lhs = neq || lhgt ? FT(lbi * std::log1p((rhest * jaccard - lhest) * z)): FT(0); FT rhs = neq || rhgt ? FT(lbi * std::log1p((lhest * jaccard - rhest) * z)): FT(0); FT ret = 0; if(neq) ret += neq * std::log1p(lhs + rhs); if(lhgt) ret += lhgt * std::log(-lhs); if(rhgt) ret += rhgt * std::log(-rhs); if(std::isnan(ret)) return std::numeric_limits<FT>::max(); return -ret; }; return std::get<0>(detail::brent_find_minima(func, FT(0), std::min(lhest, rhest) / std::max(lhest, rhest), 24)); } #if __cplusplus >= 201703L static constexpr double INVMUL64 = 0x1p-64; #else static constexpr double INVMUL64 = 5.42101086242752217e-20; #endif // Implementations of set sketch template<typename FT> class mvt_t { FT mv_; FT *data_ = nullptr; size_t m_; public: mvt_t(size_t m, FT mv = std::numeric_limits<FT>::max()): mv_(m), m_(m) {} FT mv() const {return mv_;} FT *data() {return data_;} const FT *data() const {return data_;} // Check size and max size_t getm() const {return m_;} size_t nelem() const {return 2 * m_ - 1;} FT operator[](size_t i) const {return data_[i];} void assign(FT *vals, size_t nvals, FT mv) { mv_ = mv; assign(vals, nvals); } void assign(FT *vals, size_t nvals) { data_ = vals; m_ = nvals; std::fill(data_, data_ + nelem(), mv_); } FT max() const { return data_[nelem() - 1]; } FT klow() const { return max(); } bool update(size_t index, FT x) { const auto sz = nelem(); if(x < data_[index]) { for(;;) { data_[index] = x; if((index = m_ + (index >> 1)) >= sz) break; const size_t lhi = (index - m_) << 1, rhi = lhi + 1; x = std::max(data_[lhi], data_[rhi]); if(x >= data_[index]) break; } assert(max() == *std::max_element(data_, data_ + m_)); return true; } return false; } }; template<typename ResT> struct minvt_t { static constexpr ResT minv_ = 0; ResT *data_ = nullptr; size_t m_; long double b_ = -1., explim_ = -1.; minvt_t(size_t m): m_(m) {} double explim() const {return explim_;} ResT *data() {return data_;} const ResT *data() const {return data_;} // Check size and max size_t getm() const {return m_;} ResT operator[](size_t i) const {return data_[i];} void assign(ResT *vals, size_t nvals, double b) { data_ = vals; m_ = nvals; b_ = b; std::fill(data_, data_ + (m_ << 1) - 1, minv_); explim_ = std::pow(b_, -min()); } typename std::ptrdiff_t min() const { return data_[(m_ << 1) - 2]; } typename std::ptrdiff_t klow() const { return min(); } typename std::ptrdiff_t max() const {return *std::max_element(data_, &data_[(m_ << 1) - 1]);} bool update(size_t index, ResT x) { const auto sz = (m_ << 1) - 1; if(x > data_[index]) { for(;;) { data_[index] = x; if((index = m_ + (index >> 1)) >= sz) break; const size_t lhi = (index - m_) << 1, rhi = lhi + 1; x = std::min(data_[lhi], data_[rhi]); if(x <= data_[index]) break; } explim_ = std::pow(b_, -min()); assert(min() == *std::min_element(data_, data_ + m_)); return true; } return false; } }; template<typename ResT> struct LowKHelper { ResT *vals_; uint64_t natval_, nvals_; double b_ = -1.; double explim_; int klow_ = 0; LowKHelper(size_t m): nvals_(m) {} void assign(ResT *vals, size_t nvals, double b) { vals_ = vals; nvals_ = nvals; b_ = b; reset(); } int klow() const {return klow_;} auto max() const {return *std::max_element(vals_, vals_ + nvals_);} double explim() const {return explim_;} void reset() { klow_ = *std::min_element(vals_, vals_ + nvals_); size_t i; for(i = natval_ = 0; i < nvals_; ++i) natval_ += (vals_[i] == klow_); explim_ = std::pow(b_, -klow_); } bool update(size_t idx, ResT k) { if(k > vals_[idx]) { auto oldv = vals_[idx]; vals_[idx] = k; remove(oldv); return true; } return false; } void remove(int kval) { if(kval == klow_) { if(--natval_ == 0) reset(); } } }; #if __AVX2__ INLINE float broadcast_reduce_sum(__m256 x) { const __m256 permHalves = _mm256_permute2f128_ps(x, x, 1); const __m256 m0 = _mm256_add_ps(permHalves, x); const __m256 perm0 = _mm256_permute_ps(m0, 0b01001110); const __m256 m1 = _mm256_add_ps(m0, perm0); const __m256 perm1 = _mm256_permute_ps(m1, 0b10110001); const __m256 m2 = _mm256_add_ps(perm1, m1); return m2[0]; } INLINE double broadcast_reduce_sum(__m256d x) { __m256d m1 = _mm256_add_pd(x, _mm256_permute2f128_pd(x, x, 1)); return _mm256_add_pd(m1, _mm256_permute_pd(m1, 5))[0]; } #endif static inline long double g_b(long double b, long double arg) { return (1.L - std::pow(b, -arg)) / (1.L - 1.L / b); } template<typename ResT, typename FT=double> class SetSketch; // Forward template<typename FT=double, bool FLOGFILTER=true> class CSetSketch { static_assert(std::is_floating_point<FT>::value, "Must float"); // SetSketch 1 size_t m_; // Number of registers std::unique_ptr<FT[], detail::Deleter> data_; fy::LazyShuffler ls_; mvt_t<FT> mvt_; std::vector<uint64_t> ids_; std::vector<uint32_t> idcounts_; uint64_t total_updates_ = 0; mutable double mycard_ = -1.; static FT *allocate(size_t n) { n = (n << 1) - 1; FT *ret = nullptr; static constexpr size_t ALN = #if __AVX512F__ 64; #elif __AVX2__ 32; #else 16; #endif if(posix_memalign((void **)&ret, ALN, n * sizeof(FT))) throw std::bad_alloc(); return ret; } FT getbeta(size_t idx) const { return FT(1.) / static_cast<FT>(m_ - idx); } public: const FT *data() const {return data_.get();} FT *data() {return data_.get();} CSetSketch(size_t m, bool track_ids=false, bool track_counts=false, FT maxv=std::numeric_limits<FT>::max()): m_(m), ls_(m_), mvt_(m_) { data_.reset(allocate(m_)); mvt_.assign(data_.get(), m_, maxv); if(track_ids || track_counts) ids_.resize(m_); if(track_counts) idcounts_.resize(m_); //generate_betas(); } CSetSketch(const CSetSketch &o): m_(o.m_), data_(allocate(o.m_)), ls_(m_), mvt_(m_, o.mvt_.mv()), ids_(o.ids_), idcounts_(o.idcounts_) { mvt_.assign(data_.get(), m_, o.mvt_.mv()); std::copy(o.data_.get(), &o.data_[2 * m_ - 1], data_.get()); //generate_betas(); } template<typename ResT=uint16_t> SetSketch<ResT, FT> to_setsketch(double b, double a, int64_t q=std::numeric_limits<ResT>::max() - 1) const { SetSketch<ResT, FT> ret(m_, b, a, q, ids_.size()); const double logbinv = 1. / std::log1p(b - 1.); for(size_t i = 0; i < m_; ++i) { ret.lowkh().update(i, std::max(int64_t(0), std::min(int64_t(q) + 1, static_cast<int64_t>((1. - std::log(data_[i] / a) * logbinv))))); } return ret; } CSetSketch &operator=(const CSetSketch &o) { if(size() != o.size()) { if(m_ < o.m_) data_.reset(allocate(o.m_)); m_ = o.m_; ls_.resize(m_); //generate_betas(); } mvt_.assign(data_.get(), m_, o.mvt_.mv()); std::copy(o.data(), o.data() + (2 * m_ - 1), data()); if(o.ids_.size()) { ids_ = o.ids_; if(o.idcounts_.size()) idcounts_ = o.idcounts_; } total_updates_ = o.total_updates_; return *this; } CSetSketch(std::FILE *fp): ls_(1), mvt_(1) {read(fp);} CSetSketch(gzFile fp): ls_(1), mvt_(1) {read(fp);} CSetSketch(const std::string &s): ls_(1), mvt_(1) { read(s); } CSetSketch<FT> clone_like() const { return CSetSketch(m_, !ids().empty(), !idcounts().empty()); } FT min() const {return *std::min_element(data(), data() + m_);} FT max() const {return mvt_.max();} size_t size() const {return m_;} FT &operator[](size_t i) {return data_[i];} const FT &operator[](size_t i) const {return data_[i];} void addh(uint64_t id) {update(id);} void add(uint64_t id) {update(id);} size_t total_updates() const {return total_updates_;} long double flog(long double x) const { __uint128_t yi; std::memcpy(&yi, &x, sizeof(x)); return yi * 3.7575583950764744255e-20L - 11356.176832703863597L; } double flog(double x) const { uint64_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 1.539095918623324e-16 - 709.0895657128241; } float flog(float x) const { uint32_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 8.2629582881927490e-8f - 88.02969186f; } void update(const uint64_t id) { mycard_ = -1.; ++total_updates_; uint64_t hid = id; uint64_t rv = wy::wyhash64_stateless(&hid); FT ev; FT mv = max(); CONST_IF(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; const FT bv = -1. / m_; lrv |= wy::wyhash64_stateless(&rv); FT tv = static_cast<long double>((lrv >> 32) * 1.2621774483536188887e-29L); ev = bv * std::log(tv); if(ev > mv) return; } else { auto tv = rv * INVMUL64; const FT bv = -1. / m_; // Filter with fast log first CONST_IF(FLOGFILTER) { if(bv * flog(tv) * FT(.7) > mv) return; } ev = bv * std::log(tv); if(ev > mv) return; } ls_.reset(); ls_.seed(rv); uint64_t bi = 1; uint32_t idx = ls_.step(); for(;;) { if(mvt_.update(idx, ev)) { if(!ids_.empty()) { ids_.operator[](idx) = id; if(!idcounts_.empty()) idcounts_.operator[](idx) = 1; } mv = max(); } else if(!idcounts_.empty()) { if(id == ids_.operator[](idx)) ++idcounts_.operator[](idx); } if(bi == m_) return; rv = wy::wyhash64_stateless(&hid); const FT bv = -getbeta(bi++); CONST_IF(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; lrv |= wy::wyhash64_stateless(&rv); ev = std::fma(bv, std::log((lrv >> 32) * 1.2621774483536188887e-29L), ev); if(ev > mv) break; } else { const FT nv = rv * INVMUL64; CONST_IF(FLOGFILTER) { if(bv * flog(nv) * FT(.7) + ev >= mv) break; } ev = std::fma(bv, std::log(nv), ev); if(ev > mv) break; } idx = ls_.step(); } } bool operator==(const CSetSketch<FT> &o) const { return same_params(o) && std::equal(data(), data() + m_, o.data()); } bool same_params(const CSetSketch<FT> &o) const { return m_ == o.m_ && (ids().empty() == o.ids().empty()) && (idcounts().empty() == o.idcounts().empty()); } void merge(const CSetSketch<FT> &o) { if(!same_params(o)) throw std::runtime_error("Can't merge sets with differing parameters"); if(ids().empty()) { std::transform(data(), data() + m_, o.data(), data(), [](auto x, auto y) {return std::min(x, y);}); } else { for(size_t i = 0; i < size(); ++i) { if(!idcounts_.empty() && !ids_.empty() && ids_[i] == o.ids_[i]) { idcounts_[i] += o.idcounts_[i]; } else if(mvt_.update(i, o.data_[i])) { if(!ids_.empty()) ids_[i] = o.ids_[i]; if(!idcounts_.empty()) idcounts_[i] = o.idcounts_[i]; } } } total_updates_ += o.total_updates_; mycard_ = -1.; } CSetSketch &operator+=(const CSetSketch<FT> &o) {merge(o); return *this;} CSetSketch operator+(const CSetSketch<FT> &o) const { CSetSketch ret(*this); ret += o; return ret; } double jaccard_index(const CSetSketch<FT> &o) const { return shared_registers(o) / double(m_); } size_t shared_registers(const CSetSketch<FT> &o) const { CONST_IF(sizeof(FT) == 4) { return eq::count_eq((uint32_t *)data(), (uint32_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 8) { return eq::count_eq((uint64_t *)data(), (uint64_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 2) { return eq::count_eq((uint16_t *)data(), (uint16_t *)o.data(), m_); } auto optr = o.data(); return std::accumulate(data(), data() + m_, size_t(0), [&optr](size_t nshared, FT x) { return nshared + (x == *optr++); }); } void write(std::string s) const { gzFile fp = gzopen(s.data(), "w"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s + "for writing"); write(fp); gzclose(fp); } void read(std::string s) { gzFile fp = gzopen(s.data(), "r"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s); read(fp); gzclose(fp); } void read(gzFile fp) { gzread(fp, &m_, sizeof(m_)); FT mv; gzread(fp, &mv, sizeof(mv)); data_.reset(allocate(m_)); mvt_.assign(data_.get(), m_, mv); gzread(fp, (void *)data_.get(), m_ * sizeof(FT)); for(size_t i = 0;i < m_; ++i) mvt_.update(i, data_[i]); ls_.resize(m_); } int checkwrite(std::FILE *fp, const void *ptr, size_t nb) const { auto ret = ::write(::fileno(fp), ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } int checkwrite(gzFile fp, const void *ptr, size_t nb) const { auto ret = gzwrite(fp, ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } void write(std::FILE *fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); FT m = mvt_.mv(); checkwrite(fp, (const void *)&m, sizeof(m)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void write(gzFile fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); FT m = mvt_.mv(); checkwrite(fp, (const void *)&m, sizeof(m)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void reset() {clear();} void clear() { mvt_.assign(data_.get(), m_, mvt_.mv()); total_updates_ = 0; if(ids_.size()) { std::fill(ids_.begin(), ids_.end(), uint64_t(0)); if(idcounts_.size()) std::fill(idcounts_.begin(), idcounts_.end(), uint32_t(0)); } mycard_ = -1.; } const std::vector<uint64_t> &ids() const {return ids_;} const std::vector<uint32_t> &idcounts() const {return idcounts_;} double union_size(const CSetSketch<FT> &o) const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += std::min(data_[i], o.data_[i]); return m_ / s; } auto alpha_beta(const CSetSketch<FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); return std::pair<double, double>{double(gtlt.first) / m_, double(gtlt.second) / m_}; } static constexpr double __union_card(double alph, double beta, double lhcard, double rhcard) { return std::max((lhcard + rhcard) / (2. - alph - beta), 0.); } double getcard() const { if(mycard_ < 0.) mycard_ = cardinality(); return mycard_; } double intersection_size(const CSetSketch<FT> &o) const { auto triple = alpha_beta_mu(o); return std::max(1. - (std::get<0>(triple) + std::get<1>(triple)), 0.) * std::get<2>(triple); } std::tuple<double, double, double> alpha_beta_mu(const CSetSketch<FT> &o) const { const auto ab = alpha_beta(o); auto mycard = getcard(), ocard = o.getcard(); if(ab.first + ab.second >= 1.) // They seem to be disjoint sets, use SetSketch (15) return {(mycard) / (mycard + ocard), ocard / (mycard + ocard), mycard + ocard}; return {ab.first, ab.second, __union_card(ab.first, ab.second, mycard, ocard)}; } double cardinality_estimate() const {return cardinality();} double cardinality() const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += data_[i]; return m_ / s; } static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg, size_t q) { long double b = std::exp(std::log((long double)maxreg / (long double)minreg) / (long double)q); return {FT(b), FT((long double)maxreg / b)}; } template<typename ResT=uint16_t> static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg) { if(maxreg < minreg) std::swap(maxreg, minreg); return optimal_parameters(maxreg, minreg, std::numeric_limits<ResT>::max()); } double containment_index(const CSetSketch<FT> &o) const { auto abm = alpha_beta_mu(o); auto lho = std::get<0>(abm); auto isf = std::max(1. - (lho + std::get<1>(abm)), 0.); return isf / (lho + isf); } }; template<typename FT=double, bool FLOGFILTER=true> class OPCSetSketch { static_assert(std::is_floating_point<FT>::value, "Must float"); // SetSketch 1 size_t m_; // Number of registers std::unique_ptr<FT[], detail::Deleter> data_; schism::Schismatic<uint32_t> div_; std::vector<uint64_t> ids_; std::vector<uint32_t> idcounts_; uint64_t total_updates_ = 0; mutable double mycard_ = -1.; static FT *allocate(size_t n) { FT *ret = nullptr; static constexpr size_t ALN = #if __AVX512F__ 64; #elif __AVX2__ 32; #else 16; #endif if(posix_memalign((void **)&ret, ALN, n * sizeof(FT))) throw std::bad_alloc(); return ret; } public: const FT *data() const {return data_.get();} FT *data() {return data_.get();} OPCSetSketch(size_t m, bool track_ids=false, bool track_counts=false, FT maxv=std::numeric_limits<FT>::max()): m_(m), div_(m_) { data_.reset(allocate(m_)); std::fill(data_.get(), &data_[m_], maxv); if(track_ids || track_counts) ids_.resize(m_); if(track_counts && !track_ids) { std::fprintf(stderr, "track_counts implies track_ids, enabling both\n"); track_ids = true; } if(track_counts) idcounts_.resize(m_); //generate_betas(); } OPCSetSketch(const OPCSetSketch &o): m_(o.m_), data_(allocate(o.m_)), div_(m_), ids_(o.ids_), idcounts_(o.idcounts_) { std::copy(o.data_[0], &o.data_[m_], data_.get()); //generate_betas(); } template<typename ResT=uint16_t> SetSketch<ResT, FT> to_setsketch(double b, double a, int64_t q=std::numeric_limits<ResT>::max() - 1) const { SetSketch<ResT, FT> ret(m_, b, a, q, ids_.size()); const double logbinv = 1. / std::log1p(b - 1.); for(size_t i = 0; i < m_; ++i) { ret.lowkh().update(i, std::max(int64_t(0), std::min(int64_t(q) + 1, static_cast<int64_t>((1. - std::log(data_[i] / a) * logbinv))))); } return ret; } OPCSetSketch &operator=(const OPCSetSketch &o) { if(size() != o.size()) { if(m_ < o.m_) data_.reset(allocate(o.m_)); m_ = o.m_; } std::copy(o.data(), &o.data()[m_], data()); if(o.ids_.size()) { ids_ = o.ids_; if(o.idcounts_.size()) idcounts_ = o.idcounts_; } total_updates_ = o.total_updates_; return *this; } OPCSetSketch(std::FILE *fp): div_(1) {read(fp);} OPCSetSketch(gzFile fp): div_(1) {read(fp);} OPCSetSketch(const std::string &s): div_(1) { read(s); } OPCSetSketch<FT> clone_like() const { return OPCSetSketch(m_, !ids().empty(), !idcounts().empty()); } FT min() const {return *std::min_element(data(), data() + m_);} FT max() const {return *std::max_element(data(), data() + m_);} size_t size() const {return m_;} FT &operator[](size_t i) {return data_[i];} const FT &operator[](size_t i) const {return data_[i];} void addh(uint64_t id) {update(id);} void add(uint64_t id) {update(id);} size_t total_updates() const {return total_updates_;} long double flog(long double x) const { __uint128_t yi; std::memcpy(&yi, &x, sizeof(x)); return yi * 3.7575583950764744255e-20L - 11356.176832703863597L; } double flog(double x) const { uint64_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 1.539095918623324e-16 - 709.0895657128241; } float flog(float x) const { uint32_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 8.2629582881927490e-8f - 88.02969186f; } bool update(const uint64_t id) { mycard_ = -1.; ++total_updates_; uint64_t hid = id; uint64_t rv = wy::wyhash64_stateless(&hid); FT ev; CONST_IF(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; const FT bv = -1. / m_; lrv |= wy::wyhash64_stateless(&rv); FT tv = static_cast<long double>((lrv >> 32) * 1.2621774483536188887e-29L); ev = bv * std::log(tv); } else { auto tv = rv * INVMUL64; const FT bv = -1. / m_; // Filter with fast log first ev = bv * std::log(tv); } auto idx = div_.mod(rv); if(data_[idx] > ev) { data_[idx] = ev; if(!ids_.empty()) { ids_[idx] = id; if(!idcounts_.empty()) idcounts_[idx] = 1; } return true; } else if(data_[idx] == ev && !ids_.empty() && ids_[idx] == id && !idcounts_.empty()) ++idcounts_[idx]; return false; } bool operator==(const OPCSetSketch<FT> &o) const { return same_params(o) && std::equal(data(), data() + m_, o.data()); } bool same_params(const OPCSetSketch<FT> &o) const { return m_ == o.m_ && (ids().empty() == o.ids().empty()) && (idcounts().empty() == o.idcounts().empty()); } void merge(const OPCSetSketch<FT> &o) { if(!same_params(o)) throw std::runtime_error("Can't merge sets with differing parameters"); if(ids().empty()) { std::transform(data(), data() + m_, o.data(), data(), [](auto x, auto y) {return std::min(x, y);}); } else { for(size_t i = 0; i < size(); ++i) { if(!idcounts_.empty() && !ids_.empty() && ids_[i] == o.ids_[i]) { idcounts_[i] += o.idcounts_[i]; } else if(data_[i] < o.data_[i]) { data_[i] = o.data_[i]; if(!ids_.empty()) ids_[i] = o.ids_[i]; if(!idcounts_.empty()) idcounts_[i] = o.idcounts_[i]; } } } total_updates_ += o.total_updates_; mycard_ = -1.; } OPCSetSketch &operator+=(const OPCSetSketch<FT> &o) {merge(o); return *this;} OPCSetSketch operator+(const OPCSetSketch<FT> &o) const { OPCSetSketch ret(*this); ret += o; return ret; } double jaccard_index(const OPCSetSketch<FT> &o) const { return shared_registers(o) / double(m_); } size_t shared_registers(const OPCSetSketch<FT> &o) const { CONST_IF(sizeof(FT) == 4) { return eq::count_eq((uint32_t *)data(), (uint32_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 8) { return eq::count_eq((uint64_t *)data(), (uint64_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 2) { return eq::count_eq((uint16_t *)data(), (uint16_t *)o.data(), m_); } auto optr = o.data(); return std::accumulate(data(), data() + m_, size_t(0), [&optr](size_t nshared, FT x) { return nshared + (x == *optr++); }); } void write(std::string s) const { gzFile fp = gzopen(s.data(), "w"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s + "for writing"); write(fp); gzclose(fp); } void read(std::string s) { gzFile fp = gzopen(s.data(), "r"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s); read(fp); gzclose(fp); } void read(gzFile fp) { gzread(fp, &m_, sizeof(m_)); data_.reset(allocate(m_)); div_ = schism::Schismatic<uint32_t>(m_); gzread(fp, (void *)data_.get(), m_ * sizeof(FT)); } int checkwrite(std::FILE *fp, const void *ptr, size_t nb) const { auto ret = ::write(::fileno(fp), ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } int checkwrite(gzFile fp, const void *ptr, size_t nb) const { auto ret = gzwrite(fp, ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } void write(std::FILE *fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void write(gzFile fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void reset() {clear();} void clear() { std::fill_n(data_.get(), m_, std::numeric_limits<FT>::max()); total_updates_ = 0; if(ids_.size()) { std::fill(ids_.begin(), ids_.end(), uint64_t(0)); if(idcounts_.size()) std::fill(idcounts_.begin(), idcounts_.end(), uint32_t(0)); } mycard_ = -1.; } const std::vector<uint64_t> &ids() const {return ids_;} const std::vector<uint32_t> &idcounts() const {return idcounts_;} double union_size(const OPCSetSketch<FT> &o) const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += std::min(data_[i], o.data_[i]); return m_ / s; } auto alpha_beta(const OPCSetSketch<FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); return std::pair<double, double>{double(gtlt.first) / m_, double(gtlt.second) / m_}; } static constexpr double __union_card(double alph, double beta, double lhcard, double rhcard) { return std::max((lhcard + rhcard) / (2. - alph - beta), 0.); } double getcard() const { if(mycard_ < 0.) mycard_ = cardinality(); return mycard_; } double intersection_size(const OPCSetSketch<FT> &o) const { auto triple = alpha_beta_mu(o); return std::max(1. - (std::get<0>(triple) + std::get<1>(triple)), 0.) * std::get<2>(triple); } std::tuple<double, double, double> alpha_beta_mu(const OPCSetSketch<FT> &o) const { const auto ab = alpha_beta(o); auto mycard = getcard(), ocard = o.getcard(); if(ab.first + ab.second >= 1.) // They seem to be disjoint sets, use SetSketch (15) return {(mycard) / (mycard + ocard), ocard / (mycard + ocard), mycard + ocard}; return {ab.first, ab.second, __union_card(ab.first, ab.second, mycard, ocard)}; } double cardinality_estimate() const {return cardinality();} double cardinality() const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += data_[i]; return m_ / s; } static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg, size_t q) { long double b = std::exp(std::log((long double)maxreg / (long double)minreg) / (long double)q); return {FT(b), FT((long double)maxreg / b)}; } template<typename ResT=uint16_t> static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg) { if(maxreg < minreg) std::swap(maxreg, minreg); return optimal_parameters(maxreg, minreg, std::numeric_limits<ResT>::max()); } double containment_index(const OPCSetSketch<FT> &o) const { auto abm = alpha_beta_mu(o); auto lho = std::get<0>(abm); auto isf = std::max(1. - (lho + std::get<1>(abm)), 0.); return isf / (lho + isf); } }; template<typename FT> static inline double intersection_size(const OPCSetSketch<FT> &lhs, const OPCSetSketch<FT> &rhs) { return lhs.intersection_size(rhs); } template<typename FT> static inline double intersection_size(const CSetSketch<FT> &lhs, const CSetSketch<FT> &rhs) { return lhs.intersection_size(rhs); } template<typename ResT, typename FT> class SetSketch { static_assert(std::is_floating_point<FT>::value, "Must float"); static_assert(std::is_integral<ResT>::value, "Must be integral"); // Set sketch 1 size_t m_; // Number of registers FT a_; // Exponential parameter FT b_; // Base FT ainv_; FT logbinv_; using QType = std::common_type_t<ResT, int>; QType q_; std::unique_ptr<ResT[], detail::Deleter> data_; std::vector<uint64_t> ids_; // The IDs representing the sampled items. // Only used if SetSketch is fy::LazyShuffler ls_; minvt_t<ResT> lowkh_; std::vector<FT> lbetas_; // Cache Beta values * 1. / a mutable double mycard_ = -1.; static ResT *allocate(size_t n) { n = (n << 1) - 1; ResT *ret = nullptr; static constexpr size_t ALN = #if __AVX512F__ 64; #elif __AVX2__ 32; #else 16; #endif #if __cplusplus >= 201703L && defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) if((ret = static_cast<ResT *>(std::aligned_alloc(ALN, n * sizeof(ResT)))) == nullptr) #else if(posix_memalign((void **)&ret, ALN, n * sizeof(ResT))) #endif throw std::bad_alloc(); return ret; } FT getbeta(size_t idx) const { return FT(1.) / (m_ - idx); } public: const ResT *data() const {return data_.get();} ResT *data() {return data_.get();} auto &lowkh() {return lowkh_;} const auto &lowkh() const {return lowkh_;} SetSketch(size_t m, FT b, FT a, int q, bool track_ids = false): m_(m), a_(a), b_(b), ainv_(1./ a), logbinv_(1. / std::log1p(b_ - 1.)), q_(q), ls_(m_), lowkh_(m) { ResT *p = allocate(m_); data_.reset(p); std::fill(p, p + m_, static_cast<ResT>(0)); lowkh_.assign(p, m_, b_); if(track_ids) ids_.resize(m_); lbetas_.resize(m_); for(size_t i = 0; i < m_; ++i) { lbetas_[i] = -ainv_ / (m_ - i); } } SetSketch(const SetSketch &o): m_(o.m_), a_(o.a_), b_(o.b_), ainv_(o.ainv_), logbinv_(o.logbinv_), q_(o.q_), ls_(m_), lowkh_(m_), lbetas_(o.lbetas_) { ResT *p = allocate(m_); data_.reset(p); lowkh_.assign(p, m_, b_); std::copy(o.data_.get(), &o.data_[2 * m_ - 1], p); } SetSketch(SetSketch &&o) = default; SetSketch(const std::string &s): ls_(1), lowkh_(1) { read(s); } size_t size() const {return m_;} double b() const {return b_;} double a() const {return a_;} ResT &operator[](size_t i) {return data_[i];} const ResT &operator[](size_t i) const {return data_[i];} int klow() const {return lowkh_.klow();} auto max() const {return lowkh_.max();} auto min() const {return lowkh_.min();} void addh(uint64_t id) {update(id);} void add(uint64_t id) {update(id);} void print() const { std::fprintf(stderr, "%zu = m, a %lg, b %lg, q %d\n", m_, double(a_), double(b_), int(q_)); } void update(const uint64_t id) { mycard_ = -1.; uint64_t hid = id; size_t bi = 0; uint64_t rv = wy::wyhash64_stateless(&hid); double ev = 0.; ls_.reset(); ls_.seed(rv); for(;;) { const auto ba = lbetas_[bi]; if(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; lrv |= wy::wyhash64_stateless(&rv); ev += ba * std::log((lrv >> 32) * 1.2621774483536188887e-29L); } else { ev += ba * std::log(rv * INVMUL64); } if(ev > lowkh_.explim()) return; const QType k = std::max(0, std::min(q_ + 1, static_cast<QType>((1. - std::log(ev) * logbinv_)))); if(k <= klow()) return; auto idx = ls_.step(); if(lowkh_.update(idx, k)) { if(!ids_.empty()) { ids_[idx] = id; } } if(++bi == m_) return; rv = wy::wyhash64_stateless(&hid); } } bool operator==(const SetSketch<ResT, FT> &o) const { return same_params(o) && std::equal(data(), data() + m_, o.data()); } bool same_params(const SetSketch<ResT,FT> &o) const { return std::tie(b_, a_, m_, q_) == std::tie(o.b_, o.a_, o.m_, o.q_); } double harmean(const SetSketch<ResT, FT> *ptr=static_cast<const SetSketch<ResT, FT> *>(nullptr)) const { static std::unordered_map<FT, std::vector<FT>> powers; auto it = powers.find(b_); if(it == powers.end()) { it = powers.emplace(b_, std::vector<FT>()).first; it->second.resize(q_ + 2); for(size_t i = 0; i < it->second.size(); ++i) { it->second[i] = std::pow(static_cast<long double>(b_), -static_cast<ptrdiff_t>(i)); } } std::vector<uint32_t> counts(q_ + 2); if(ptr) { for(size_t i = 0; i < m_; ++i) { ++counts[std::max(data_[i], ptr->data()[i])]; } } else { for(size_t i = 0; i < m_; ++i) { ++counts[data_[i]]; } } long double ret = 0.; for(ptrdiff_t i = lowkh_.klow(); i <= q_ + 1; ++i) { ret += counts[i] * it->second[i]; } return ret; } double jaccard_by_ix(const SetSketch<ResT, FT> &o) const { auto us = union_size(o); auto mycard = getcard(), ocard = o.getcard(); return (mycard + ocard - us) / us; } double union_size(const SetSketch<ResT, FT> &o) const { double num = m_ * (1. - 1. / b_) * logbinv_ * ainv_; return num / harmean(&o); } double cardinality_estimate() const {return cardinality();} double cardinality() const { double num = m_ * (1. - 1. / b_) * logbinv_ * ainv_; return num / harmean(); } void merge(const SetSketch<ResT, FT> &o) { if(!same_params(o)) throw std::runtime_error("Can't merge sets with differing parameters"); std::transform(data(), data() + m_, o.data(), data(), [](auto x, auto y) {return std::max(x, y);}); mycard_ = -1.; } SetSketch &operator+=(const SetSketch<ResT, FT> &o) {merge(o); return *this;} SetSketch operator+(const SetSketch<ResT, FT> &o) const { SetSketch ret(*this); ret += o; return ret; } size_t shared_registers(const SetSketch<ResT, FT> &o) const { return eq::count_eq(data(), o.data(), m_); } std::pair<double, double> alpha_beta(const SetSketch<ResT, FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); double alpha = g_b(b_, double(gtlt.first) / m_); double beta = g_b(b_, double(gtlt.second) / m_); return {alpha, beta}; } static constexpr double __union_card(double alph, double beta, double lhcard, double rhcard) { return std::max((lhcard + rhcard) / (2. - alph - beta), 0.); } double getcard() const { if(mycard_ < 0.) mycard_ = cardinality(); return mycard_; } double jaccard_index(const SetSketch<ResT, FT> &o) const { if(!same_params(o)) throw std::invalid_argument("Parameters must match for comparison"); auto gtlt = eq::count_gtlt(data(), o.data(), m_); return jmle_simple<double>(gtlt.first, gtlt.second, m_, getcard(), o.getcard(), b_); } std::tuple<double, double, double> jointmle(const SetSketch<ResT, FT> &o) const { auto ji = jaccard_index(o); const auto y = 1. / (1. + ji); double mycard = getcard(), ocard = o.getcard(); return {std::max(0., mycard - ocard * ji) * y, std::max(0., ocard - mycard * ji) * y, (mycard + ocard) * ji * y}; }; double jaccard_index_by_card(const SetSketch<ResT, FT> &o) const { auto tup = jointmle(o); return std::get<2>(tup) / (std::get<0>(tup) + std::get<1>(tup) + std::get<2>(tup)); } std::tuple<double, double, double> alpha_beta_mu(const SetSketch<ResT, FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); double alpha = g_b(b_, double(gtlt.first) / m_); double beta = g_b(b_, double(gtlt.second) / m_); double mycard = getcard(), ocard = o.getcard(); if(alpha + beta >= 1.) // They seem to be disjoint sets, use SetSketch (15) return {(mycard) / (mycard + ocard), ocard / (mycard + ocard), mycard + ocard}; return {alpha, beta, __union_card(alpha, beta, mycard, ocard)}; } void write(std::string s) const { gzFile fp = gzopen(s.data(), "w"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s + "for writing"); write(fp); gzclose(fp); } void read(std::string s) { gzFile fp = gzopen(s.data(), "r"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s); read(fp); gzclose(fp); } void read(gzFile fp) { gzread(fp, &m_, sizeof(m_)); gzread(fp, &a_, sizeof(a_)); gzread(fp, &b_, sizeof(b_)); gzread(fp, &q_, sizeof(q_)); ainv_ = 1.L / a_; logbinv_ = 1.L / std::log1p(b_ - 1.); data_.reset(allocate(m_)); lowkh_.assign(data_.get(), m_, b_); gzread(fp, (void *)data_.get(), m_ * sizeof(ResT)); std::fill(&data_[m_], &data_[2 * m_ - 1], ResT(0)); for(size_t i = 0;i < m_; ++i) lowkh_.update(i, data_[i]); ls_.resize(m_); } int checkwrite(std::FILE *fp, const void *ptr, size_t nb) const { auto ret = ::write(::fileno(fp), ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } int checkwrite(gzFile fp, const void *ptr, size_t nb) const { auto ret = gzwrite(fp, ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } void write(std::FILE *fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)&a_, sizeof(a_)); checkwrite(fp, (const void *)&b_, sizeof(b_)); checkwrite(fp, (const void *)&q_, sizeof(q_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(ResT)); } void write(gzFile fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)&a_, sizeof(a_)); checkwrite(fp, (const void *)&b_, sizeof(b_)); checkwrite(fp, (const void *)&q_, sizeof(q_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(ResT)); } void clear() { std::fill(data_.get(), &data_[m_ * 2 - 1], ResT(0)); mycard_ = -1.; } const std::vector<uint64_t> &ids() const {return ids_;} }; #ifndef M_E #define EULER_E 2.718281828459045 #else #define EULER_E M_E #endif struct NibbleSetS: public SetSketch<uint8_t> { NibbleSetS(size_t nreg, double b=EULER_E, double a=5e-4): SetSketch<uint8_t>(nreg, b, a, QV) {} static constexpr size_t QV = 14u; template<typename Arg> NibbleSetS(const Arg &arg): SetSketch<uint8_t>(arg) {} }; struct SmallNibbleSetS: public SetSketch<uint8_t> { SmallNibbleSetS(size_t nreg, double b=4., double a=1e-6): SetSketch<uint8_t>(nreg, b, a, QV) {} static constexpr size_t QV = 14u; template<typename Arg> SmallNibbleSetS(const Arg &arg): SetSketch<uint8_t>(arg) {} }; struct ByteSetS: public SetSketch<uint8_t, long double> { using Super = SetSketch<uint8_t, long double>; static constexpr size_t QV = 254u; ByteSetS(size_t nreg, long double b=1.2, long double a=20.): Super(nreg, b, a, QV) {} template<typename Arg> ByteSetS(const Arg &arg): Super(arg) {} }; struct ShortSetS: public SetSketch<uint16_t, long double> { static constexpr long double DEFAULT_B = 1.0005; static constexpr long double DEFAULT_A = .06; static constexpr size_t QV = 65534u; ShortSetS(size_t nreg, long double b=DEFAULT_B, long double a=DEFAULT_A): SetSketch<uint16_t, long double>(nreg, b, a, QV) {} template<typename Arg> ShortSetS(const Arg &arg): SetSketch<uint16_t, long double>(arg) {} }; struct WideShortSetS: public SetSketch<uint16_t, long double> { static constexpr long double DEFAULT_B = 1.0004; static constexpr long double DEFAULT_A = .06; static constexpr size_t QV = 65534u; WideShortSetS(size_t nreg, long double b=DEFAULT_B, long double a=DEFAULT_A): SetSketch<uint16_t, long double>(nreg, b, a, QV) {} template<typename...Args> WideShortSetS(Args &&...args): SetSketch<uint16_t, long double>(std::forward<Args>(args)...) {} }; struct EShortSetS: public SetSketch<uint16_t, long double> { using Super = SetSketch<uint16_t, long double>; static constexpr long double DEFAULT_B = 1.0006; static constexpr long double DEFAULT_A = .06; static constexpr size_t QV = 65534u; template<typename IT, typename OFT, typename=typename std::enable_if<std::is_integral<IT>::value && std::is_floating_point<OFT>::value>::type> EShortSetS(IT nreg, OFT b=DEFAULT_B, OFT a=DEFAULT_A): Super(nreg, b, a, QV) {} EShortSetS(size_t nreg): Super(nreg, DEFAULT_B, DEFAULT_A, QV) {} EShortSetS(int nreg): Super(nreg, DEFAULT_B, DEFAULT_A, QV) {} template<typename...Args> EShortSetS(Args &&...args): Super(std::forward<Args>(args)...) {} }; struct EByteSetS: public SetSketch<uint8_t, double> { static constexpr double DEFAULT_B = 1.09; static constexpr double DEFAULT_A = .08; static constexpr size_t QV = 254u; template<typename IT, typename=typename std::enable_if<std::is_integral<IT>::value>::type> EByteSetS(IT nreg, double b=DEFAULT_B, double a=DEFAULT_A): SetSketch<uint8_t, double>(nreg, b, a, QV) {} template<typename...Args> EByteSetS(Args &&...args): SetSketch<uint8_t, double>(std::forward<Args>(args)...) {} }; } // namespace setsketch using setsketch::EByteSetS; using setsketch::ByteSetS; using setsketch::ShortSetS; using setsketch::EShortSetS; using setsketch::WideShortSetS; using setsketch::NibbleSetS; using setsketch::SmallNibbleSetS; using setsketch::CSetSketch; using setsketch::SetSketch; } // namespace sketch #endif
relic_cp_phpe.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (c) 2014 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or modify it under the * terms of the version 2.1 (or later) of the GNU Lesser General Public License * as published by the Free Software Foundation; or version 2.0 of the Apache * License as published by the Apache Software Foundation. See the LICENSE files * for more details. * * RELIC is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE. See the LICENSE files for more details. * * You should have received a copy of the GNU Lesser General Public or the * Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/> * or <https://www.apache.org/licenses/>. */ /** * @file * * Implementation of Paillier's Homomorphic Probabilistic Encryption. * * @ingroup cp */ #include "relic.h" /*============================================================================*/ /* Public definitions */ /*============================================================================*/ int cp_phpe_gen(bn_t pub, phpe_t prv, int bits) { int result = RLC_OK; /* Generate primes p and q of equivalent length. */ do { bn_gen_prime(prv->p, bits / 2); bn_gen_prime(prv->q, bits / 2); } while (bn_cmp(prv->p, prv->q) == RLC_EQ); /* Compute n = pq and l = \phi(n). */ bn_mul(prv->n, prv->p, prv->q); #ifdef CP_CRT /* Fix g = n + 1. */ bn_add_dig(pub, prv->n, 1); /* Precompute dp = 1/(pow(g, p-1, p^2)//p mod p. */ bn_sqr(prv->dp, prv->p); bn_sub_dig(prv->p, prv->p, 1); bn_mxp(prv->dp, pub, prv->p, prv->dp); bn_sub_dig(prv->dp, prv->dp, 1); bn_div(prv->dp, prv->dp, prv->p); /* Precompute dq = 1/(pow(g, q-1, q^2)//q mod q. */ bn_sqr(prv->dq, prv->q); bn_sub_dig(prv->q, prv->q, 1); bn_mxp(prv->dq, pub, prv->q, prv->dq); bn_sub_dig(prv->dq, prv->dq, 1); bn_div(prv->dq, prv->dq, prv->q); /* Restore p and q. */ bn_add_dig(prv->p, prv->p, 1); bn_add_dig(prv->q, prv->q, 1); bn_mod_inv(prv->dp, prv->dp, prv->p); bn_mod_inv(prv->dq, prv->dq, prv->q); /* qInv = q^(-1) mod p. */ bn_mod_inv(prv->qi, prv->q, prv->p); #endif bn_copy(pub, prv->n); return result; } int cp_phpe_enc(bn_t c, bn_t m, bn_t pub) { bn_t g, r, s; int result = RLC_OK; bn_null(g); bn_null(r); bn_null(s); if (pub == NULL || bn_bits(m) > bn_bits(pub)) { return RLC_ERR; } RLC_TRY { bn_new(g); bn_new(r); bn_new(s); /* Generate r in Z_n^*. */ bn_rand_mod(r, pub); /* Compute c = (g^m)(r^n) mod n^2. */ bn_add_dig(g, pub, 1); bn_sqr(s, pub); bn_mxp(c, g, m, s); bn_mxp(r, r, pub, s); bn_mul(c, c, r); bn_mod(c, c, s); } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(g); bn_free(r); bn_free(s); } return result; } int cp_phpe_dec(bn_t m, bn_t c, phpe_t prv) { bn_t s, t, u, v; int result = RLC_OK; if (prv == NULL || bn_bits(c) > 2 * bn_bits(prv->n)) { return RLC_ERR; } bn_null(s); bn_null(t); bn_null(u); bn_null(v); RLC_TRY { bn_new(s); bn_new(t); bn_new(u); bn_new(v); #if !defined(CP_CRT) bn_sub_dig(s, prv->p, 1); bn_sub_dig(t, prv->q, 1); bn_mul(s, s, t); /* Compute (c^l mod n^2) * u mod n. */ bn_sqr(t, prv->n); bn_mxp(m, c, s, t); bn_sub_dig(m, m, 1); bn_div(m, m, prv->n); bn_mod_inv(t, s, prv->n); bn_mul(m, m, t); bn_mod(m, m, prv->n); #else #if MULTI == OPENMP omp_set_num_threads(CORES); #pragma omp parallel copyin(core_ctx) firstprivate(c, prv) { #pragma omp sections { #pragma omp section { #endif /* Compute m_p = (c^(p-1) mod p^2) * dp mod p. */ bn_sub_dig(t, prv->p, 1); bn_sqr(s, prv->p); bn_mxp(s, c, t, s); bn_sub_dig(s, s, 1); bn_div(s, s, prv->p); bn_mul(s, s, prv->dp); bn_mod(s, s, prv->p); #if MULTI == OPENMP } #pragma omp section { #endif /* Compute m_q = (c^(q-1) mod q^2) * dq mod q. */ bn_sub_dig(v, prv->q, 1); bn_sqr(u, prv->q); bn_mxp(u, c, v, u); bn_sub_dig(u, u, 1); bn_div(u, u, prv->q); bn_mul(u, u, prv->dq); bn_mod(u, u, prv->q); #if MULTI == OPENMP } } } #endif /* m = (m_p - m_q) mod p. */ bn_sub(m, s, u); while (bn_sign(m) == RLC_NEG) { bn_add(m, m, prv->p); } bn_mod(m, m, prv->p); /* m1 = qInv(m_p - m_q) mod p. */ bn_mul(m, m, prv->qi); bn_mod(m, m, prv->p); /* m = m2 + m1 * q. */ bn_mul(m, m, prv->q); bn_add(m, m, u); bn_mod(m, m, prv->n); #endif } RLC_CATCH_ANY { result = RLC_ERR; } RLC_FINALLY { bn_free(s); bn_free(t); bn_free(u); bn_free(v); } return result; }
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
2d_array_ptr_v2.c
#include <stdlib.h> #include <omp.h> int main() { int* data = malloc( sizeof(int) * 4); int** arr = malloc(sizeof(int*)*2); arr[0] = data; arr[1] = data + 2; #pragma omp parallel { arr[0][0] = 0; arr[0][1] = 1; arr[1][0] = 2; arr[1][1] = 3; printf("[%d, %d],\n[%d, %d]\n",arr[0][0], arr[0][1], arr[1][0], arr[1][1]); } free(data); free(arr); }
update_ops_named_CZ.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif //void CZ_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CZ_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CZ_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); //void CZ_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim); void CZ_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { //CZ_gate_old_single(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_old_parallel(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_single(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); //CZ_gate_parallel(control_qubit_index, target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); } else { CZ_gate_parallel_simd(control_qubit_index, target_qubit_index, state, dim); } #else CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); } else { CZ_gate_parallel_unroll(control_qubit_index, target_qubit_index, state, dim); } #else CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim); #endif #endif } void CZ_gate_single_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index==0) { for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } }else { for (state_index = 0; state_index < loop_dim; state_index+=2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; state[basis_index+1] *= -1; } } } #ifdef _OPENMP void CZ_gate_parallel_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index == 0) { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; state[basis_index + 1] *= -1; } } } #endif #ifdef _USE_SIMD void CZ_gate_single_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index == 0) { for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } else { __m256d minus_one = _mm256_set_pd(-1, -1, -1, -1); for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_mul_pd(data,minus_one); _mm256_storeu_pd(ptr, data); } } } #ifdef _OPENMP void CZ_gate_parallel_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; if (target_qubit_index == 0 || control_qubit_index == 0) { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } else { __m256d minus_one = _mm256_set_pd(-1, -1, -1, -1); #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_mul_pd(data, minus_one); _mm256_storeu_pd(ptr, data); } } } #endif #endif /* void CZ_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t1 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask ^ target_mask; state[basis_c1t1] *= -1; } } #ifdef _OPENMP void CZ_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index); ITYPE basis_c1t1 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask ^ target_mask; state[basis_c1t1] *= -1; } } #endif void CZ_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } #ifdef _OPENMP void CZ_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 4; const ITYPE target_mask = 1ULL << target_qubit_index; const ITYPE control_mask = 1ULL << control_qubit_index; const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1); const ITYPE low_mask = min_qubit_mask - 1; const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask; const ITYPE high_mask = ~(max_qubit_mask - 1); const ITYPE mask = target_mask + control_mask; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index = (state_index&low_mask) + ((state_index&mid_mask) << 1) + ((state_index&high_mask) << 2) + mask; state[basis_index] *= -1; } } #endif */
GB_unop__tan_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__tan_fp32_fp32 // op(A') function: GB_unop_tran__tan_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = tanf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tanf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = tanf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TAN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__tan_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = tanf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = tanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__tan_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
arm_device.h
#ifndef ANAKIN2_SABER_ARM_DEVICES_H #define ANAKIN2_SABER_ARM_DEVICES_H #include <stdio.h> #include <vector> #include "device.h" #ifdef PLATFORM_ANDROID #include <sys/syscall.h> #include <unistd.h> #define __NCPUBITS__ (8 * sizeof (unsigned long)) #define __CPU_SET(cpu, cpusetp) \ ((cpusetp)->mask_bits[(cpu) / __NCPUBITS__] |= (1UL << ((cpu) % __NCPUBITS__))) #define __CPU_ZERO(cpusetp) \ memset((cpusetp), 0, sizeof(cpu_set_t)) #endif #if __APPLE__ #include "TargetConditionals.h" #if TARGET_OS_IPHONE #include <sys/types.h> #include <sys/sysctl.h> #include <mach/machine.h> #define __IOS__ #endif #endif #ifdef USE_ARM_PLACE static int arm_get_cpucount() { #ifdef PLATFORM_ANDROID // get cpu count from /proc/cpuinfo FILE* fp = fopen("/proc/cpuinfo", "rb"); if (!fp) { return 1; } int count = 0; char line[1024]; while (!feof(fp)) { char* s = fgets(line, 1024, fp); if (!s) { break; } if (memcmp(line, "processor", 9) == 0) { count++; } } fclose(fp); if (count < 1) { count = 1; } return count; #elif __IOS__ int count = 0; size_t len = sizeof(count); sysctlbyname("hw.ncpu", &count, &len, NULL, 0); if (count < 1) { count = 1; } return count; #else return 1; #endif } static int arm_get_meminfo() { #ifdef PLATFORM_ANDROID // get cpu count from /proc/cpuinfo FILE* fp = fopen("/proc/meminfo", "rb"); if (!fp) { return 1; } int memsize = 0; char line[1024]; while (!feof(fp)) { char* s = fgets(line, 1024, fp); if (!s) { break; } sscanf(s, "MemTotal: %d kB", &memsize); } fclose(fp); return memsize; #elif __IOS__ // to be implemented return 0; #endif } #ifdef PLATFORM_ANDROID static int get_max_freq_khz(int cpuid) { // first try, for all possible cpu char path[256]; snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state",\ cpuid); FILE* fp = fopen(path, "rb"); if (!fp) { // second try, for online cpu snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state",\ cpuid); fp = fopen(path, "rb"); if (!fp) { // third try, for online cpu snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq",\ cpuid); fp = fopen(path, "rb"); if (!fp) { return -1; } int max_freq_khz = -1; fscanf(fp, "%d", &max_freq_khz); fclose(fp); return max_freq_khz; } } int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) { break; } if (freq_khz > max_freq_khz) { max_freq_khz = freq_khz; } } fclose(fp); return max_freq_khz; } static int arm_sort_cpuid_by_max_frequency(int cpu_count, std::vector<int>& cpuids, \ std::vector<int>& cpu_freq, std::vector<int>& cluster_ids) { //const int cpu_count = cpuids.size(); if (cpu_count == 0) { return 0; } //std::vector<int> cpu_max_freq_khz; cpuids.resize(cpu_count); cpu_freq.resize(cpu_count); cluster_ids.resize(cpu_count); for (int i = 0; i < cpu_count; i++) { int max_freq_khz = get_max_freq_khz(i); //printf("%d max freq = %d khz\n", i, max_freq_khz); cpuids[i] = i; cpu_freq[i] = max_freq_khz / 1000; } // sort cpuid as big core first // simple bubble sort /* for (int i = 0; i < cpu_count; i++) { for (int j = i+1; j < cpu_count; j++) { if (cpu_freq[i] < cpu_freq[j]) { // swap int tmp = cpuids[i]; cpuids[i] = cpuids[j]; cpuids[j] = tmp; tmp = cpu_freq[i]; cpu_freq[i] = cpu_freq[j]; cpu_freq[j] = tmp; } } }*/ // SMP int mid_max_freq_khz = (cpu_freq.front() + cpu_freq.back()) / 2; //if (mid_max_freq_khz == cpu_freq.back()) // return 0; for (int i = 0; i < cpu_count; i++) { if (cpu_freq[i] >= mid_max_freq_khz) { cluster_ids[i] = 0; } else{ cluster_ids[i] = 1; } } return 0; } #endif // __ANDROID__ #ifdef __IOS__ static int sort_cpuid_by_max_frequency(int cpu_count, std::vector<int>& cpuids, \ std::vector<int>& cpu_freq, std::vector<int>& cluster_ids){ if (cpu_count == 0) { return 0; } cpuids.resize(cpu_count); cpu_freq.resize(cpu_count); cluster_ids.resize(cpu_count); for (int i = 0; i < cpu_count; ++i) { cpuids[i] = i; cpu_freq[i] = 1000; cluster_ids[i] = 0; } } #endif #ifdef PLATFORM_ANDROID static int set_sched_affinity(const std::vector<int>& cpuids) { // cpu_set_t definition // ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity typedef struct { unsigned long mask_bits[1024 / __NCPUBITS__]; }cpu_set_t; // set affinity for thread pid_t pid = gettid(); cpu_set_t mask; __CPU_ZERO(&mask); for (int i = 0; i < (int)cpuids.size(); i++) { __CPU_SET(cpuids[i], &mask); } int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask); if (syscallret) { LOG(ERROR) << "syscall error " << syscallret; return -1; } return 0; } static int set_cpu_affinity(const std::vector<int>& cpuids){ #ifdef USE_OPENMP int num_threads = cpuids.size(); omp_set_num_threads(num_threads); std::vector<int> ssarets(num_threads, 0); #pragma omp parallel for for (int i = 0; i < num_threads; i++) { ssarets[i] = set_sched_affinity(cpuids); } for (int i = 0; i < num_threads; i++) { if (ssarets[i] != 0) { LOG(ERROR)<<"set cpu affinity failed, cpuID: " << cpuids[i]; return -1; } } #else std::vector<int> cpuid1; cpuid1.push_back(cpuids[0]); int ssaret = set_sched_affinity(cpuid1); if (ssaret != 0) { LOG(ERROR)<<"set cpu affinity failed, cpuID: " << cpuids[0]; return -1; } #endif } #endif //PLATFORN_ANDROID #endif //USE_ARM_PLACE #endif //ANAKIN2_SABER_ARM_DEVICES_H
dvr_smd.h
#ifndef METHODS_DVR_SMD_H #define METHODS_DVR_SMD_H namespace method { namespace dvr_smd { namespace details { template<typename Function> auto expectation(const Function & function, const arma::mat & points, const arma::vec & weights, const arma::vec & scaling) { const arma::mat scaled_points = arma::diagmat(1 / scaling) * points; auto result = at(function, scaled_points); return arma::dot(result, weights) / arma::sum(weights); } template<typename Function> auto expectation(const Function & function, const cwa::State & state, const arma::vec & scaling) { return expectation(function, state.points, state.weights, scaling); } template<typename T> auto at_search(const math::polynomial::Term <T> & term, const cwa::State & state, const arma::vec & expectations, const arma::uvec & table, const arma::vec & scaling, const arma::uword grade) { if ((arma::uword) arma::max(term.exponents) >= grade) { return expectation(term, state.points, state.weights, scaling); } else { const arma::uvec indices = arma::conv_to<arma::uvec>::from(term.exponents); return term.coef * expectations(math::space::indices_to_index(indices, table)); } } template<typename T> auto at_search(const math::Polynomial <T> & polynomial, const cwa::State & state, const arma::vec & expectations, const arma::uvec & table, const arma::vec & scaling, const arma::uword grade) { auto result = at_search(polynomial.term(0), state, expectations, table, scaling, grade); for (arma::uword i = 1; i < polynomial.coefs.n_elem; i++) { result += at_search(polynomial.term(i), state, expectations, table, scaling, grade); } return result; } } // namespace details struct State { public: dvr::State dvr_state; arma::vec masses; arma::uvec momentum_space_grid; arma::mat momentum_space_range; arma::uword grade; arma::uvec expectation_table; arma::vec expectations; arma::uvec positional_indices; arma::uvec momentum_indices; arma::vec scaling; // Establish an easy way to construct your State template<typename WaveFunction> State(const WaveFunction & initial, const arma::uvec & grid, const arma::mat & range, const arma::vec & masses, const arma::uword grade) : dvr_state(initial, grid.rows(0, grid.n_elem / 2 - 1), range.rows(0, grid.n_elem / 2 - 1), masses), masses(masses), momentum_space_grid(grid.rows(grid.n_elem / 2, grid.n_elem - 1)), momentum_space_range(range.rows(grid.n_elem / 2, grid.n_elem - 1)), grade(grade), expectation_table(math::space::grids_to_table( grade * arma::ones<arma::uvec>(grid.n_elem))) { if (grid.n_rows != range.n_rows) { throw Error("Different dimension between the grid and the range"); } if (grid.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the grid and the masses"); } const arma::uword dimension = grid.n_elem; const arma::uword length = std::pow(grade, dimension); this->expectations = arma::vec(length); this->positional_indices = arma::uvec(dimension / 2); this->momentum_indices = arma::uvec(dimension / 2); const arma::vec diff = range.col(1) - range.col(0); this->scaling = diff; // indices check in #pragma omp parallel for for (arma::uword i = 0; i < dimension / 2; i++) { arma::uvec X = arma::zeros<arma::uvec>(dimension); arma::uvec P = arma::zeros<arma::uvec>(dimension); X(i) = 1; P(i + dimension / 2) = 1; this->positional_indices(i) = math::space::indices_to_index(X, this->expectation_table); this->momentum_indices(i) = math::space::indices_to_index(P, this->expectation_table); } const auto transformed = initial.wigner_transform(); const cwa::State initial_cwa(transformed, grid, range, masses); // expectations check in for (arma::uword i = 0; i < length; i++) { const lvec indices = arma::conv_to<lvec>::from( math::space::index_to_indices(i, this->expectation_table)); this->expectations(i) = details::expectation(math::polynomial::Term(1.0, indices), initial_cwa, this->scaling); } } template<typename WaveFunction> State(const WaveFunction & initial, const arma::uvec & grid, const arma::mat & range, const arma::uword grade) : dvr_state(initial, grid.rows(0, grid.n_elem / 2 - 1), range.rows(0, grid.n_elem / 2 - 1), masses), masses(arma::ones<arma::vec>(grid.n_rows / 2)), momentum_space_grid(grid.rows(grid.n_elem / 2, grid.n_elem - 1)), momentum_space_range(range.rows(grid.n_elem / 2, grid.n_elem - 1)), grade(grade), expectation_table(math::space::grids_to_table( grade * arma::ones<arma::uvec>(grid.n_rows))) { if (grid.n_rows != range.n_rows) { throw Error("Different dimension between the grid and the range"); } if (grid.n_rows != 2 * masses.n_rows) { throw Error("Different dimension between the grid and the masses"); } const auto dimension = grid.n_elem; const auto length = std::pow(grade, dimension); this->expectations = arma::vec(length); this->positional_indices = arma::uvec(dimension / 2); this->momentum_indices = arma::uvec(dimension / 2); const arma::vec diff = range.col(1) - range.col(0); this->scaling = diff; // exponents check in for (arma::uword i = 0; i < dimension / 2; i++) { arma::uvec X = arma::zeros<arma::uvec>(dimension); arma::uvec P = arma::zeros<arma::uvec>(dimension); X(i) = 1; P(i + dimension / 2) = 1; this->positional_indices(i) = math::space::indices_to_index(X, this->expectation_table); this->momentum_indices(i) = math::space::indices_to_index(P, this->expectation_table); } const auto transformed = initial.wigner_transform( this->momentum_space_grid, this->momentum_space_range ); const cwa::State initial_cwa(transformed, grid, range, masses); // expectations check in for (arma::uword i = 0; i < length; i++) { const lvec indices = arma::conv_to<lvec>::from( math::space::index_to_indices(i, this->expectation_table)); this->expectations(i) = details::expectation(math::polynomial::Term(1.0, indices), initial_cwa, this->scaling); } } inline State(const dvr::State & dvr_state, const arma::vec & masses, const arma::uvec & grid, const arma::mat & range, const arma::uvec & expectation_table, const arma::vec & expectations, const arma::uvec & positional_indices, const arma::uvec & momentum_indices, const arma::vec & scaling, const arma::uword grade) : dvr_state(dvr_state), masses(masses), momentum_space_grid(grid), momentum_space_range(range), grade(grade), expectation_table(expectation_table), expectations(expectations), positional_indices(positional_indices), momentum_indices(momentum_indices), scaling(scaling) {} inline State(const State & state) : dvr_state(state.dvr_state), masses(state.masses), momentum_space_grid(state.momentum_space_grid), momentum_space_range(state.momentum_space_range), grade(state.grade), expectation_table(state.expectation_table), expectations(state.expectations), positional_indices(state.positional_indices), momentum_indices(state.momentum_indices), scaling(state.scaling) {} inline arma::uword dim() const { return this->dvr_state.dim(); } inline arma::vec positional_expectation() const { const arma::vec result = this->expectations(this->positional_indices); const arma::vec scale = this->scaling.rows(0, this->dim() - 1); return result * scale; } inline arma::vec momentum_expectation() const { const arma::vec result = this->expectations(this->momentum_indices); const arma::vec scale = this->scaling.rows(this->dim(), 2 * this->dim() - 1); return result * scale; } State operator+(const State & B) const { State state = B; state.expectations += this->expectations; return state; } State operator*(const double B) const { State state = *this; state.expectations *= B; return state; } arma::vec expectation(const std::vector<math::Polynomial<double>> & polynomials) const { const auto transformed = this->dvr_state.wigner_transform( this->momentum_space_grid, this->momentum_space_range ); arma::vec result(polynomials.size()); #pragma omp parallel for for (arma::uword i = 0; i < result.n_elem; i++) { result(i) = details::at_search(polynomials[i], transformed, this->expectations, this->expectation_table, this->scaling, this->grade); } return result; } template<typename T> auto expectation(const math::Polynomial <T> & polynomial) const { return details::at_search(polynomial, this->dvr_state.wigner_transform( this->momentum_space_grid, this->momentum_space_range ), this->expectations, this->expectation_table, this->scaling, this->grade); } State & operator=(const State &) = default; }; struct Operator { public: math::Polynomial<double> potential; math::Polynomial<double> H; std::vector<math::Polynomial < double>> operators; dvr::Operator dvr_operator; Operator(const State & state, const math::Polynomial<double> & potential) : potential(potential), H(hamiltonian(potential, state.masses).scale(state.scaling)), operators(), dvr_operator(state.dvr_state, potential) { std::vector<math::Polynomial<double>> op(std::pow(state.grade, state.dim() * 2)); op[0] = math::Polynomial<double>(state.dim() * 2); for (arma::uword i = 1; i < op.size(); i++) { const auto observable = math::Polynomial(math::polynomial::Term<double>(1.0, math::space::index_to_indices( i, state.expectation_table))); const arma::uword cut_off = std::min(observable.grade(), H.grade()) / 2; const auto moyal = moyal_bracket(math::Polynomial(observable), H, state.scaling, cut_off); op[i] = moyal; } this->operators = op; } inline PropagationType propagation_type() const { return Mixed; } State operator()(const State & state) const { arma::vec expectation_change_list = arma::vec(arma::size(state.expectations)); const auto transformed = state.dvr_state.wigner_transform( state.momentum_space_grid, state.momentum_space_range ); #pragma omp parallel for for (arma::uword i = 0; i < expectation_change_list.n_elem; i++) { expectation_change_list(i) = details::at_search(this->operators[i], transformed, state.expectations, state.expectation_table, state.scaling, state.grade); } return State(state.dvr_state, state.masses, state.momentum_space_grid, state.momentum_space_range, state.expectation_table, expectation_change_list, state.positional_indices, state.momentum_indices, state.scaling, state.grade); } }; template<typename Operator, typename State, typename Potential> OperatorWrapper <Operator, State, Potential> mixed_runge_kutta_4 = [](const Operator & liouville_operator, const Potential & potential) -> Propagator <State> { static_assert(has_propagation_type<Operator, PropagationType(void)>::value, "Propagation type not specified"); if (liouville_operator.propagation_type() != Mixed) { Error( "This wrapper is only valid for mixed type"); } if constexpr(has_time_evolve<Potential, void(const double &)>::value) { return [&liouville_operator, &potential](const State & state, const double dt) -> State { Potential potential_at_half_dt = potential; potential_at_half_dt.time_evolve(0.5 * dt); Potential potential_at_dt = potential; potential_at_dt.time_evolve(dt); const Propagator<dvr::State> dvr_propagator = math::schrotinger_wrapper<dvr::Operator, dvr::State, Potential>( liouville_operator.dvr_operator, potential); const auto dvr_propagator_at_half_dt = math::schrotinger_wrapper<dvr::Operator, dvr::State, Potential>( liouville_operator.dvr_operator, potential_at_half_dt); const auto dvr_propagator_at_dt = math::schrotinger_wrapper<dvr::Operator, dvr::State, Potential>( liouville_operator.dvr_operator, potential_at_dt); const Operator operator_at_half_dt = Operator(state, potential_at_half_dt); const Operator operator_at_dt = Operator(state, potential_at_dt); const State k1 = liouville_operator(state) * dt; State k1_with_dvr_at_half_dt = k1; k1_with_dvr_at_half_dt.dvr_state = dvr_propagator(k1.dvr_state, dt / 2.0); const State k2 = operator_at_half_dt(k1_with_dvr_at_half_dt * 0.5 + state) * dt; const State k3 = operator_at_half_dt(k1_with_dvr_at_half_dt * 0.5 + state) * dt; State k3_with_dvr_at_half_dt = k3; k3_with_dvr_at_half_dt.dvr_state = dvr_propagator_at_half_dt(k3.dvr_state, dt / 2.0); const State k4 = operator_at_dt(state + k3_with_dvr_at_half_dt) * dt; return state + k1 * (1.0 / 6.0) + k2 * (1.0 / 3.0) + k3 * (1.0 / 3.0) + k4 * (1.0 / 6.0); }; } else { return [&liouville_operator, &potential](const State & state, const double dt) -> State { const auto dvr_propagator = math::schrotinger_wrapper<dvr::Operator, dvr::State, Potential>( liouville_operator.dvr_operator, potential); const State k1 = liouville_operator(state) * dt; State k1_with_dvr_at_half_dt = k1; k1_with_dvr_at_half_dt.dvr_state = dvr_propagator(k1.dvr_state, dt / 2.0); const State k2 = liouville_operator(k1 * 0.5 + state) * dt; const State k3 = liouville_operator(k2 * 0.5 + state) * dt; State k3_with_dvr_at_half_dt = k3; k3_with_dvr_at_half_dt.dvr_state = dvr_propagator(k3.dvr_state, dt / 2.0); const State k4 = liouville_operator(state + k3) * dt; return state + k1 * (1.0 / 6.0) + k2 * (1.0 / 3.0) + k3 * (1.0 / 3.0) + k4 * (1.0 / 6.0); }; } }; } // namespace dvr_smd } #endif //METHODS_DVR_SMD_H
pumathreadpool.c
#define _GNU_SOURCE #include <sched.h> #include "pumathreadpool.h" #include "internal/profiling.h" #include "internal/pthreadbarrier.h" #include <stdlib.h> #include <stdbool.h> #include <stdio.h> #include <string.h> #include <pthread.h> #include <unistd.h> #ifndef NOOPENMP #include <omp.h> #endif #ifdef NOOPENMP static pthread_once_t offsetKeyOnce = PTHREAD_ONCE_INIT; static pthread_key_t timeOffsetKey; #endif // NOOPENMP #ifdef STATIC_THREADPOOL static bool _setupComplete = false; static struct pumaThreadPool* threadPool; #endif struct _threadPoolWorkerInfo { struct pumaThreadPool* pool; size_t cpu; size_t threadNum; double timeSeconds; }; struct pumaThreadPool { size_t numThreads; pthread_t* threads; struct _threadPoolWorkerInfo** threadsInfo; pthread_barrier_t workWaitBarrier; pthread_barrier_t doneBarrier; pthread_rwlock_t workFunctionLock; void (*workFunction)(void* arg); void* arg; double timeSeconds; }; #ifdef NOOPENMP static pthread_key_t numThreadsKey; static pthread_key_t threadNumKey; static pthread_key_t cpuNumKey; static pthread_once_t key_once = PTHREAD_ONCE_INIT; #endif // NOOPENMP size_t pumaGetThreadNum(void) { #ifdef NOOPENMP void* ptr = pthread_getspecific(threadNumKey); if(ptr != NULL) return *((size_t*)ptr); else return 0; #else return omp_get_thread_num(); #endif // NOOPENMP } size_t pumaGetNumThreads(struct pumaThreadPool* pool) { if(pool == NULL) { #ifdef NOOPENMP return *((size_t*)pthread_getspecific(numThreadsKey)); #else return omp_get_max_threads(); #endif // NOOPENMP } else return pool->numThreads; } size_t pumaGetCPUNum(void) { #ifdef NOOPENMP void* ptr = pthread_getspecific(cpuNumKey); if(ptr != NULL) return *((size_t*)ptr); else return 0; #else return omp_get_thread_num(); #endif // NOOPENMP } #ifdef NOOPENMP #ifdef __linux__ static void _parseAffinityStr(char* affinityStr, cpu_set_t* set) { char* saveptr; char* str = affinityStr; char* token; CPU_ZERO(set); while(token = strtok_r(str, ",", &saveptr)) { str = NULL; unsigned int start; unsigned int end; int matched = sscanf(token, "%u-%u", &start, &end); if(matched == 1) { CPU_SET(start, set); } else if(matched == 2) { for(unsigned int i = start; i <= end; ++i) CPU_SET(i, set); } else { fprintf(stderr, "Error parsing affinity string!\n"); exit(-1); } } } #endif static inline double _max(double a, double b) { return (a > b) * a + (a <= b) * b; } static void _makeTimeKey(void) { pthread_key_create(&timeOffsetKey, NULL); pthread_setspecific(timeOffsetKey, malloc(sizeof(double))); *(double*)pthread_getspecific(timeOffsetKey) = 0; } #endif // NOOPENMP void executeOnThreadPool(struct pumaThreadPool* tp, void (*workFunction)(void* arg), void* arg) { #ifdef NOOPENMP pthread_rwlock_wrlock(&tp->workFunctionLock); tp->workFunction = workFunction; tp->arg = arg; pthread_barrier_wait(&tp->workWaitBarrier); pthread_rwlock_unlock(&tp->workFunctionLock); pthread_barrier_wait(&tp->doneBarrier); tp->timeSeconds = 0; for(size_t i = 0; i < tp->numThreads; ++i) tp->timeSeconds = _max(tp->threadsInfo[i]->timeSeconds, tp->timeSeconds); (void)pthread_once(&offsetKeyOnce, &_makeTimeKey); *(double*)pthread_getspecific(timeOffsetKey) += tp->timeSeconds; #else #pragma omp parallel num_threads(tp->numThreads) { workFunction(arg); } #endif // NOOPENMP } #ifdef NOOPENMP static void* _threadPoolWorker(void* arg) { struct _threadPoolWorkerInfo* info = (struct _threadPoolWorkerInfo*)arg; struct pumaThreadPool* pool = info->pool; /* Local stack copy because each pthreads thread has its own stack and we don't do cross-thread stack writes, meaning that the stack's pages will be numa domain local due to first-touch paging. */ size_t numThreads = pool->numThreads; size_t threadNum = info->threadNum; size_t cpu = info->cpu; pthread_setspecific(numThreadsKey, &numThreads); pthread_setspecific(threadNumKey, &threadNum); pthread_setspecific(cpuNumKey, &cpu); #ifdef __linux__ cpu_set_t set; CPU_ZERO(&set); CPU_SET(info->cpu, &set); sched_setaffinity(0, sizeof(set), &set); #endif PROFILING_DECLS(doWork); while(true) { pthread_barrier_wait(&pool->workWaitBarrier); pthread_rwlock_rdlock(&pool->workFunctionLock); void (*workFunction)(void* arg) = pool->workFunction; void* arg = pool->arg; pthread_rwlock_unlock(&pool->workFunctionLock); PROFILE(doWork, workFunction(arg);) info->timeSeconds = GET_ELAPSED_S(doWork); pthread_barrier_wait(&pool->doneBarrier); } } static void _make_keys(void) { pthread_key_create(&numThreadsKey, NULL); pthread_key_create(&threadNumKey, NULL); pthread_key_create(&cpuNumKey, NULL); } #endif // NOOPENMP #ifdef STATIC_THREADPOOL struct pumaThreadPool* getThreadPool(void) { if(!_setupComplete) fprintf(stderr, "Please call setupThreadPool() before getThreadPool()."); return threadPool; } void setupThreadPool(size_t numThreads, char* affinityStr) { (void)affinityStr; if(_setupComplete) return; #else struct pumaThreadPool* createThreadPool(size_t numThreads, char* affinityStr) { (void)affinityStr; struct pumaThreadPool* threadPool; #endif threadPool = (struct pumaThreadPool*)calloc(1, sizeof(struct pumaThreadPool)); numThreads = (numThreads != 0) ? numThreads : sysconf(_SC_NPROCESSORS_ONLN); threadPool->numThreads = numThreads; threadPool->threads = (pthread_t*)calloc(numThreads, sizeof(pthread_t)); #ifdef NOOPENMP threadPool->threadsInfo = (struct _threadPoolWorkerInfo**)calloc(numThreads, sizeof(struct _threadPoolWorkerInfo*)); pthread_barrier_init(&threadPool->workWaitBarrier, NULL, numThreads + 1); pthread_barrier_init(&threadPool->doneBarrier, NULL, numThreads + 1); pthread_rwlock_init(&threadPool->workFunctionLock, NULL); (void)pthread_once(&key_once, &_make_keys); size_t currThread = 0; #ifdef __linux__ if(affinityStr != NULL) { cpu_set_t cpus; CPU_ZERO(&cpus); _parseAffinityStr(affinityStr, &cpus); size_t nCPUs = CPU_COUNT(&cpus); for(size_t i = 0; i < nCPUs && currThread < numThreads; ++i) { if(CPU_ISSET(i, &cpus)) { struct _threadPoolWorkerInfo* tpInfo = (struct _threadPoolWorkerInfo*)malloc( sizeof(struct _threadPoolWorkerInfo)); tpInfo->pool = threadPool; tpInfo->cpu = i; tpInfo->threadNum = currThread; threadPool->threadsInfo[currThread] = tpInfo; pthread_create(&threadPool->threads[currThread++], NULL, &_threadPoolWorker, tpInfo); } } } else #endif // __linux__ { for(size_t i = 0; currThread < numThreads; ++i) { struct _threadPoolWorkerInfo* tpInfo = (struct _threadPoolWorkerInfo*)malloc( sizeof(struct _threadPoolWorkerInfo)); tpInfo->pool = threadPool; tpInfo->cpu = i; tpInfo->threadNum = currThread; threadPool->threadsInfo[currThread] = tpInfo; pthread_create(&threadPool->threads[currThread++], NULL, &_threadPoolWorker, tpInfo); } } #else omp_set_num_threads(numThreads); #pragma omp parallel { #ifdef __linux__ cpu_set_t set; CPU_ZERO(&set); CPU_SET(omp_get_thread_num(), &set); sched_setaffinity(0, sizeof(set), &set); #endif } #endif // NOOPENMP #ifdef STATIC_THREADPOOL _setupComplete = true; #else return threadPool; #endif } double pumaGetTimeWaitedForPool(void) { #ifdef NOOPENMP void* ptr = pthread_getspecific(timeOffsetKey); if(ptr != NULL) return *((double*)ptr); #endif // NOOPENMP return 0; } void freeThreadPool(struct pumaThreadPool* pool) { for(size_t i = 0; i < pool->numThreads; ++i) (void)pthread_cancel(pool->threads[i]); #ifdef NOOPENMP pthread_barrier_destroy(&pool->workWaitBarrier); pthread_barrier_destroy(&pool->doneBarrier); #endif // NOOPENMP free(pool->threads); free(pool); }
NAS_CG.c
//--------------------------------------------------------------------- // program CG //--------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> #if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) # define CLASS_W #endif //---------- // Class S: //---------- #ifdef CLASS_S # define NA 1400 # define NONZER 7 # define NITER 15 # define SHIFT 10.0 # define RCOND 1.0e-1 #endif //---------- // Class W: //---------- #ifdef CLASS_W # define NA 7000 # define NONZER 8 # define NITER 15 # define SHIFT 12.0 # define RCOND 1.0e-1 #endif //---------- // Class A: //---------- #ifdef CLASS_A # define NA 14000 # define NONZER 11 # define NITER 15 # define SHIFT 20.0 # define RCOND 1.0e-1 #endif //---------- // Class B: //---------- #ifdef CLASS_B # define NA 75000 # define NONZER 13 # define NITER 75 # define SHIFT 60.0 # define RCOND 1.0e-1 #endif //---------- // Class C: //---------- #ifdef CLASS_C # define NA 150000 # define NONZER 15 # define NITER 75 # define SHIFT 110.0 # define RCOND 1.0e-1 #endif #define NZ (NA*(NONZER+1)*(NONZER+1)) #define NAZ (NA*(NONZER+1)) #define T_init 0 #define T_bench 1 #define T_conj_grad 2 #define T_last 3 typedef struct { double real; double imag; } dcomplex; #define min(x,y) ((x) < (y) ? (x) : (y)) #define max(x,y) ((x) > (y) ? (x) : (y)) //--------------------------------------------------------------------- /* common / main_int_mem / */ int colidx[NZ]; int rowstr[NA + 1]; int iv[NA]; int arow[NA]; int acol[NAZ]; /* common / main_flt_mem / */ double aelt[NAZ]; double a[NZ]; double x[NA + 2]; double z[NA + 2]; double p[NA + 2]; double q[NA + 2]; double r[NA + 2]; /* common / partit_size / */ int naa; int nzz; int firstrow; int lastrow; int firstcol; int lastcol; /* common /urando/ */ double amult; double tran; //--------------------------------------------------------------------- void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm); void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER + 1], double aelt[][NONZER + 1], int iv[]); void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER + 1], double aelt[][NONZER + 1], int firstrow, int lastrow, int nzloc[], double rcond, double shift); void sprnvc(int n, int nz, int nn1, double v[], int iv[]); int icnvrt(double x, int ipwr2); void vecset(int n, double v[], int iv[], int *nzv, int i, double val); void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified); double randlc( double *x, double a ); void vranlc( int n, double *x, double a, double y[] ); double start[64], elapsed[64]; double elapsed_time( void ); void timer_clear( int n ); void timer_start( int n ); void timer_stop( int n ); double timer_read( int n ); void wtime(double *t); //--------------------------------------------------------------------- int main(int argc, char *argv[]) { int i, j, k, it; double zeta; double rnorm; double norm_temp1, norm_temp2; double t, mflops, tmax; char Class; int verified; double zeta_verify_value, epsilon, err; char *t_names[T_last]; for (i = 0; i < T_last; i++) { timer_clear(i); } timer_start(T_init); firstrow = 0; lastrow = NA - 1; firstcol = 0; lastcol = NA - 1; if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10) { Class = 'S'; zeta_verify_value = 8.5971775078648; } else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12) { Class = 'W'; zeta_verify_value = 10.362595087124; } else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20) { Class = 'A'; zeta_verify_value = 17.130235054029; } else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60) { Class = 'B'; zeta_verify_value = 22.712745482631; } else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110) { Class = 'C'; zeta_verify_value = 28.973605592845; } else if (NA == 1500000 && NONZER == 21 && NITER == 100 && SHIFT == 500) { Class = 'D'; zeta_verify_value = 52.514532105794; } else if (NA == 9000000 && NONZER == 26 && NITER == 100 && SHIFT == 1500) { Class = 'E'; zeta_verify_value = 77.522164599383; } else { Class = 'U'; } printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - CG Benchmark\n\n"); printf(" Size: %11d\n", NA); printf(" Iterations: %5d\n", NITER); printf("\n"); naa = NA; nzz = NZ; //--------------------------------------------------------------------- // Inialize random number generator //--------------------------------------------------------------------- tran = 314159265.0; amult = 1220703125.0; zeta = randlc(&tran, amult); //--------------------------------------------------------------------- // //--------------------------------------------------------------------- makea(naa, nzz, a, colidx, rowstr, firstrow, lastrow, firstcol, lastcol, arow, (int (*)[NONZER + 1])(void*)acol, (double (*)[NONZER + 1])(void*)aelt, iv); //--------------------------------------------------------------------- // Note: as a result of the above call to makea: // values of j used in indexing rowstr go from 0 --> lastrow-firstrow // values of colidx which are col indexes go from firstcol --> lastcol // So: // Shift the col index vals from actual (firstcol --> lastcol ) // to local, i.e., (0 --> lastcol-firstcol) //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j, k) firstprivate(lastrow, firstrow, firstcol, rowstr) reduction(- : colidx[:567000]) for (j = 0; j < lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j + 1]; k++) { colidx[k] = colidx[k] - firstcol; } } //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i) for (i = 0; i < NA + 1; i++) { x[i] = 1.0; } #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol) for (j = 0; j < lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } zeta = 0.0; //--------------------------------------------------------------------- //----> // Do one iteration untimed to init all code and data page tables //----> (then reinit, start timing, to niter its) //--------------------------------------------------------------------- for (it = 1; it <= 1; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, x, z) reduction(+ : norm_temp1) reduction(+ : norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j] * z[j]; norm_temp2 = norm_temp2 + z[j] * z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, norm_temp2, z) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of do one iteration untimed //--------------------------------------------------------------------- // set starting vector to (1, 1, .... 1) //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(i) for (i = 0; i < NA + 1; i++) { x[i] = 1.0; } zeta = 0.0; timer_stop(T_init); printf(" Initialization time = %15.3f seconds\n", timer_read(T_init)); timer_start(T_bench); //--------------------------------------------------------------------- //----> // Main Iteration for inverse power method //----> //--------------------------------------------------------------------- for (it = 1; it <= NITER; it++) { //--------------------------------------------------------------------- // The call to the conjugate gradient routine: //--------------------------------------------------------------------- conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm); //--------------------------------------------------------------------- // zeta = shift + 1/(x.z) // So, first: (x.z) // Also, find norm of z // So, first: (z.z) //--------------------------------------------------------------------- norm_temp1 = 0.0; norm_temp2 = 0.0; #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, x, z) reduction(+ : norm_temp1) reduction(+ : norm_temp2) for (j = 0; j < lastcol - firstcol + 1; j++) { norm_temp1 = norm_temp1 + x[j] * z[j]; norm_temp2 = norm_temp2 + z[j] * z[j]; } norm_temp2 = 1.0 / sqrt(norm_temp2); zeta = SHIFT + 1.0 / norm_temp1; if (it == 1) printf("\n iteration ||r|| zeta\n"); printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta); //--------------------------------------------------------------------- // Normalize z to obtain x //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, norm_temp2, z) for (j = 0; j < lastcol - firstcol + 1; j++) { x[j] = norm_temp2 * z[j]; } } // end of main iter inv pow meth timer_stop(T_bench); //--------------------------------------------------------------------- // End of timed section //--------------------------------------------------------------------- t = timer_read(T_bench); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (Class != 'U') { err = fabs(zeta - zeta_verify_value) / zeta_verify_value; if (err <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.13E\n", zeta); printf(" Error is %20.13E\n", err); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.13E\n", zeta); printf(" The correct zeta is %20.13E\n", zeta_verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = (double)(2 * NITER * NA) * (3.0 + (double)(NONZER * (NONZER + 1)) + 25.0 * (5.0 + (double)(NONZER * (NONZER + 1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } print_results("CG", Class, NA, 0, 0, NITER, t, mflops, " floating point", verified); int exitValue = verified ? 0 : 1; return exitValue; } //--------------------------------------------------------------------- // Floaging point arrays here are named as in NPB1 spec discussion of // CG algorithm //--------------------------------------------------------------------- void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) { int j, k; int cgit, cgitmax = 25; double d, sum, rho, rho0, alpha, beta; rho = 0.0; //--------------------------------------------------------------------- // Initialize the CG algorithm: //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(naa, x) for (j = 0; j < naa + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, r) reduction(+ : rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j] * r[j]; } //--------------------------------------------------------------------- //----> // The conj grad iteration loop //----> //--------------------------------------------------------------------- for (cgit = 1; cgit <= cgitmax; cgit++) { //--------------------------------------------------------------------- // q = A.p // The partition submatrix-vector multiply: use workspace w //--------------------------------------------------------------------- // // NOTE: this version of the multiply is actually (slightly: maybe %5) // faster on the sp2 on 16 nodes than is the unrolled-by-2 version // below. On the Cray t3d, the reverse is 1, i.e., the // unrolled-by-two version is some 10% faster. // The unrolled-by-8 version below is significantly faster // on the Cray t3d - overall speed of code is 1.5 times faster. #pragma omp parallel for default(shared) private(j, k, sum) firstprivate(lastrow, firstrow, rowstr, a, colidx, p) for (j = 0; j < lastrow - firstrow + 1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j + 1]; k++) { sum = sum + a[k] * p[colidx[k]]; } q[j] = sum; } /* for (j = 0; j < lastrow - firstrow + 1; j++) { int i = rowstr[j]; int iresidue = (rowstr[j+1] - i) % 2; double sum1 = 0.0; double sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i + iresidue; k <= rowstr[j+1] - 2; k += 2) { sum1 = sum1 + a[k] *p[colidx[k]]; sum2 = sum2 + a[k+1]*p[colidx[k+1]]; } q[j] = sum1 + sum2; } */ /* for (j = 0; j < lastrow - firstrow + 1; j++) { int i = rowstr[j]; int iresidue = (rowstr[j+1] - i) % 8; double sum = 0.0; for (k = i; k <= i + iresidue - 1; k++) { sum = sum + a[k]*p[colidx[k]]; } for (k = i + iresidue; k <= rowstr[j+1] - 8; k += 8) { sum = sum + a[k ]*p[colidx[k ]] + a[k+1]*p[colidx[k+1]] + a[k+2]*p[colidx[k+2]] + a[k+3]*p[colidx[k+3]] + a[k+4]*p[colidx[k+4]] + a[k+5]*p[colidx[k+5]] + a[k+6]*p[colidx[k+6]] + a[k+7]*p[colidx[k+7]]; } q[j] = sum; } */ //--------------------------------------------------------------------- // Obtain p.q //--------------------------------------------------------------------- d = 0.0; #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, p, q) reduction(+ : d) for (j = 0; j < lastcol - firstcol + 1; j++) { d = d + p[j] * q[j]; } //--------------------------------------------------------------------- // Obtain alpha = rho / (p.q) //--------------------------------------------------------------------- alpha = rho / d; //--------------------------------------------------------------------- // Save a temporary of rho //--------------------------------------------------------------------- rho0 = rho; //--------------------------------------------------------------------- // Obtain z = z + alpha*p // and r = r - alpha*q //--------------------------------------------------------------------- rho = 0.0; #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, alpha, p, q) for (j = 0; j < lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha * p[j]; r[j] = r[j] - alpha * q[j]; } //--------------------------------------------------------------------- // rho = r.r // Now, obtain the norm of r: First, sum squares of r elements locally... //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, r) reduction(+ : rho) for (j = 0; j < lastcol - firstcol + 1; j++) { rho = rho + r[j] * r[j]; } //--------------------------------------------------------------------- // Obtain beta: //--------------------------------------------------------------------- beta = rho / rho0; //--------------------------------------------------------------------- // p = r + beta*p //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, beta, r) for (j = 0; j < lastcol - firstcol + 1; j++) { p[j] = r[j] + beta * p[j]; } } // end of do cgit=1,cgitmax //--------------------------------------------------------------------- // Compute residual norm explicitly: ||r|| = ||x - A.z|| // First, form A.z // The partition submatrix-vector multiply //--------------------------------------------------------------------- sum = 0.0; #pragma omp parallel for default(shared) private(j, k, d) firstprivate(lastrow, firstrow, rowstr, a, colidx, z) for (j = 0; j < lastrow - firstrow + 1; j++) { d = 0.0; for (k = rowstr[j]; k < rowstr[j + 1]; k++) { d = d + a[k] * z[colidx[k]]; } r[j] = d; } //--------------------------------------------------------------------- // At this point, r contains A.z //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j, d) firstprivate(lastcol, firstcol, x, r) reduction(+ : sum) for (j = 0; j < lastcol - firstcol + 1; j++) { d = x[j] - r[j]; sum = sum + d * d; } *rnorm = sqrt(sum); } //--------------------------------------------------------------------- // generate the test problem for benchmark 6 // makea generates a sparse matrix with a // prescribed sparsity distribution // // parameter type usage // // input // // n i number of cols/rows of matrix // nz i nonzeros as declared array size // rcond r*8 condition number // shift r*8 main diagonal shift // // output // // a r*8 array for nonzeros // colidx i col indices // rowstr i row pointers // // workspace // // iv, arow, acol i // aelt r*8 //--------------------------------------------------------------------- void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][NONZER + 1], double aelt[][NONZER + 1], int iv[]) { int iouter, ivelt, nzv, nn1; int ivc[NONZER + 1]; double vc[NONZER + 1]; //--------------------------------------------------------------------- // nonzer is approximately (int(sqrt(nnza /n))); //--------------------------------------------------------------------- //--------------------------------------------------------------------- // nn1 is the smallest power of two not less than n //--------------------------------------------------------------------- nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); //--------------------------------------------------------------------- // Generate nonzero positions and save for the use in sparse. //--------------------------------------------------------------------- for (iouter = 0; iouter < n; iouter++) { nzv = NONZER; sprnvc(n, nzv, nn1, vc, ivc); vecset(n, vc, ivc, &nzv, iouter + 1, 0.5); arow[iouter] = nzv; #pragma omp parallel for default(shared) private(ivelt) firstprivate(nzv, iouter, ivc, vc) for (ivelt = 0; ivelt < nzv; ivelt++) { acol[iouter][ivelt] = ivc[ivelt] - 1; aelt[iouter][ivelt] = vc[ivelt]; } } //--------------------------------------------------------------------- // ... make the sparse matrix from list of elements with duplicates // (iv is used as workspace) //--------------------------------------------------------------------- sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol, aelt, firstrow, lastrow, iv, RCOND, SHIFT); } //--------------------------------------------------------------------- // rows range from firstrow to lastrow // the rowstr pointers are defined for nrows = lastrow-firstrow+1 values //--------------------------------------------------------------------- void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][NONZER + 1], double aelt[][NONZER + 1], int firstrow, int lastrow, int nzloc[], double rcond, double shift) { int nrows; //--------------------------------------------------- // generate a sparse matrix from a list of // [col, row, element] tri //--------------------------------------------------- int i, j, j1, j2, nza, k, kk, nzrow, jcol; double size, scale, ratio, va; int cont40; //--------------------------------------------------------------------- // how many rows of result //--------------------------------------------------------------------- nrows = lastrow - firstrow + 1; //--------------------------------------------------------------------- // ...count the number of triples in each row //--------------------------------------------------------------------- #pragma omp parallel for default(shared) private(j) firstprivate(nrows) for (j = 0; j < nrows + 1; j++) { rowstr[j] = 0; } for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza] + 1; rowstr[j] = rowstr[j] + arow[i]; } } rowstr[0] = 0; for (j = 1; j < nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } nza = rowstr[nrows] - 1; //--------------------------------------------------------------------- // ... rowstr(j) now is the location of the first nonzero // of row j of a //--------------------------------------------------------------------- if (nza > nz) { printf("Space for matrix elements exceeded in sparse\n"); printf("nza, nzmax = %d, %d\n", nza, nz); exit(EXIT_FAILURE); } //--------------------------------------------------------------------- // ... preload data pages //--------------------------------------------------------------------- for (j = 0; j < nrows; j++) { #pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr) for (k = rowstr[j]; k < rowstr[j + 1]; k++) { a[k] = 0.0; colidx[k] = -1; } nzloc[j] = 0; } //--------------------------------------------------------------------- // ... generate actual values by summing duplicates //--------------------------------------------------------------------- size = 1.0; ratio = pow(rcond, (1.0 / (double)(n))); for (i = 0; i < n; i++) { for (nza = 0; nza < arow[i]; nza++) { j = acol[i][nza]; scale = size * aelt[i][nza]; for (nzrow = 0; nzrow < arow[i]; nzrow++) { jcol = acol[i][nzrow]; va = aelt[i][nzrow] * scale; //-------------------------------------------------------------------- // ... add the identity * rcond to the generated matrix to bound // the smallest eigenvalue from below by rcond //-------------------------------------------------------------------- if (jcol == j && j == i) { va = va + rcond - shift; } cont40 = 0; for (k = rowstr[j]; k < rowstr[j + 1]; k++) { if (colidx[k] > jcol) { //---------------------------------------------------------------- // ... insert colidx here orderly //---------------------------------------------------------------- for (kk = rowstr[j + 1] - 2; kk >= k; kk--) { if (colidx[kk] > -1) { a[kk + 1] = a[kk]; colidx[kk + 1] = colidx[kk]; } } colidx[k] = jcol; a[k] = 0.0; cont40 = 1; break; } else if (colidx[k] == -1) { colidx[k] = jcol; cont40 = 1; break; } else if (colidx[k] == jcol) { //-------------------------------------------------------------- // ... mark the duplicated entry //-------------------------------------------------------------- nzloc[j] = nzloc[j] + 1; cont40 = 1; break; } } if (cont40 == 0) { printf("internal error in sparse: i=%d\n", i); exit(EXIT_FAILURE); } a[k] = a[k] + va; } } size = size * ratio; } //--------------------------------------------------------------------- // ... remove empty entries and generate final results //--------------------------------------------------------------------- for (j = 1; j < nrows; j++) { nzloc[j] = nzloc[j] + nzloc[j - 1]; } for (j = 0; j < nrows; j++) { if (j > 0) { j1 = rowstr[j] - nzloc[j - 1]; } else { j1 = 0; } j2 = rowstr[j + 1] - nzloc[j]; nza = rowstr[j]; for (k = j1; k < j2; k++) { a[k] = a[nza]; colidx[k] = colidx[nza]; nza = nza + 1; } } #pragma omp parallel for default(shared) private(j) firstprivate(nrows, nzloc) for (j = 1; j < nrows + 1; j++) { rowstr[j] = rowstr[j] - nzloc[j - 1]; } nza = rowstr[nrows] - 1; } //--------------------------------------------------------------------- // generate a sparse n-vector (v, iv) // having nzv nonzeros // // mark(i) is set to 1 if position i is nonzero. // mark is all zero on entry and is reset to all zero before exit // this corrects a performance bug found by John G. Lewis, caused by // reinitialization of mark on every one of the n calls to sprnvc //--------------------------------------------------------------------- void sprnvc(int n, int nz, int nn1, double v[], int iv[]) { int nzv, ii, i; double vecelt, vecloc; nzv = 0; while (nzv < nz) { vecelt = randlc(&tran, amult); //--------------------------------------------------------------------- // generate an integer between 1 and n in a portable manner //--------------------------------------------------------------------- vecloc = randlc(&tran, amult); i = icnvrt(vecloc, nn1) + 1; if (i > n) continue; //--------------------------------------------------------------------- // was this integer generated already? //--------------------------------------------------------------------- int was_gen = 0; for (ii = 0; ii < nzv; ii++) { if (iv[ii] == i) { was_gen = 1; break; } } if (was_gen) continue; v[nzv] = vecelt; iv[nzv] = i; nzv = nzv + 1; } } //--------------------------------------------------------------------- // scale a double precision number x in (0,1) by a power of 2 and chop it //--------------------------------------------------------------------- int icnvrt(double x, int ipwr2) { return (int)(ipwr2 * x); } //--------------------------------------------------------------------- // set ith element of sparse vector (v, iv) with // nzv nonzeros to val //--------------------------------------------------------------------- void vecset(int n, double v[], int iv[], int *nzv, int i, double val) { int k; int set; set = 0; for (k = 0; k < *nzv; k++) { if (iv[k] == i) { v[k] = val; set = 1; } } if (set == 0) { v[*nzv] = val; iv[*nzv] = i; *nzv = *nzv + 1; } } double randlc( double *x, double a ) { //-------------------------------------------------------------------- // // This routine returns a uniform pseudorandom double precision number in the // range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The returned value RANDLC is normalized to be // between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain // the new seed x_1, so that subsequent calls to RANDLC using the same // arguments will generate a continuous sequence. // // This routine should produce the same results on any computer with at least // 48 mantissa bits in double precision floating point data. On 64 bit // systems, double precision should be disabled. // // David H. Bailey October 26, 1990 // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; const double r23 = 1.1920928955078125e-07; const double r46 = r23 * r23; const double t23 = 8.388608e+06; const double t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; double r; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3); *x = t3 - t46 * t4; r = r46 * (*x); return r; } void vranlc( int n, double *x, double a, double y[] ) { //-------------------------------------------------------------------- // // This routine generates N uniform pseudorandom double precision numbers in // the range (0, 1) by using the linear congruential generator // // x_{k+1} = a x_k (mod 2^46) // // where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers // before repeating. The argument A is the same as 'a' in the above formula, // and X is the same as x_0. A and X must be odd double precision integers // in the range (1, 2^46). The N results are placed in Y and are normalized // to be between 0 and 1. X is updated to contain the new seed, so that // subsequent calls to VRANLC using the same arguments will generate a // continuous sequence. If N is zero, only initialization is performed, and // the variables X, A and Y are ignored. // // This routine is the standard version designed for scalar or RISC systems. // However, it should produce the same results on any single processor // computer with at least 48 mantissa bits in double precision floating point // data. On 64 bit systems, double precision should be disabled. // //-------------------------------------------------------------------- // r23 = pow(0.5, 23.0); //// pow(0.5, 23.0) = 1.1920928955078125e-07 // r46 = r23 * r23; // t23 = pow(2.0, 23.0); //// pow(2.0, 23.0) = 8.388608e+06 // t46 = t23 * t23; const double r23 = 1.1920928955078125e-07; const double r46 = r23 * r23; const double t23 = 8.388608e+06; const double t46 = t23 * t23; double t1, t2, t3, t4, a1, a2, x1, x2, z; int i; //-------------------------------------------------------------------- // Break A into two parts such that A = 2^23 * A1 + A2. //-------------------------------------------------------------------- t1 = r23 * a; a1 = (int) t1; a2 = a - t23 * a1; //-------------------------------------------------------------------- // Generate N results. This loop is not vectorizable. //-------------------------------------------------------------------- for ( i = 0; i < n; i++ ) { //-------------------------------------------------------------------- // Break X into two parts such that X = 2^23 * X1 + X2, compute // Z = A1 * X2 + A2 * X1 (mod 2^23), and then // X = 2^23 * Z + A2 * X2 (mod 2^46). //-------------------------------------------------------------------- t1 = r23 * (*x); x1 = (int) t1; x2 = *x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int) (r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int) (r46 * t3) ; *x = t3 - t46 * t4; y[i] = r46 * (*x); } return; } void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (void *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec; } /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return ( t ); } /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return ( elapsed[n] ); } void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) { char size[16]; int j; printf( "\n\n %s Benchmark Completed.\n", name ); printf( " Class = %12c\n", class ); // If this is not a grid-based problem (EP, FT, CG), then // we only print n1, which contains some measure of the // problem size. In that case, n2 and n3 are both zero. // Otherwise, we print the grid size n1xn2xn3 if ( ( n2 == 0 ) && ( n3 == 0 ) ) { if ( ( name[0] == 'E' ) && ( name[1] == 'P' ) ) { sprintf( size, "%15.0lf", pow(2.0, n1) ); j = 14; if ( size[j] == '.' ) { size[j] = ' '; j--; } size[j + 1] = '\0'; printf( " Size = %15s\n", size ); } else { printf( " Size = %12d\n", n1 ); } } else { printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 ); } printf( " Iterations = %12d\n", niter ); printf( " Time in seconds = %12.2lf\n", t ); printf( " Mop/s total = %15.2lf\n", mops ); printf( " Operation type = %24s\n", optype ); if ( verified ) printf( " Verification = %12s\n", "SUCCESSFUL" ); else printf( " Verification = %12s\n", "UNSUCCESSFUL" ); }
crypt-sha1_fmt_plug.c
/* * This file is based on the "cryptsha512_fmt_plug.c" file. * * This software is Copyright (c) 2014 Dhiru Kholia, and it is hereby released * to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Enhanced code (dropped usage of the Gladman hmac code), and addition of SSE2 * logic, Aug 2014, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cryptsha1; #elif FMT_REGISTERS_H john_register_one(&fmt_cryptsha1); #else #include <string.h> #ifdef _OPENMP #define OMP_SCALE 16 // untested #include <omp.h> #endif #include "arch.h" #include "sha.h" #include "params.h" #include "common.h" #include "formats.h" #include "johnswap.h" #define PBKDF1_LOGIC 1 #include "pbkdf2_hmac_sha1.h" #include "base64_convert.h" #include "memdbg.h" #define SHA1_MAGIC "$sha1$" #define SHA1_SIZE 20 #define FORMAT_LABEL "sha1crypt" #define FORMAT_NAME "NetBSD's sha1crypt" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 20 // max valid salt len in hash. Final salt 'used' is larger, by length of "$sha1$" and length of base10 string of rounds #define SALT_LENGTH 64 #ifdef MMX_COEF #define ALGORITHM_NAME "PBKDF1-SHA1 " SHA1_N_STR MMX_TYPE #else #define ALGORITHM_NAME "PBKDF1-SHA1 " ARCH_BITS_STR "/" ARCH_BITS_STR #endif #define PLAINTEXT_LENGTH 125 #define CHECKSUM_LENGTH 28 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct saltstruct) #define SALT_ALIGN 4 #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT MMX_COEF #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif /* An example hash (of password) is $sha1$40000$jtNX3nZ2$hBNaIXkt4wBI2o5rsi8KejSjNqIq. * An sha1-crypt hash string has the format $sha1$rounds$salt$checksum, where: * * $sha1$ is the prefix used to identify sha1-crypt hashes, following the Modular Crypt Format * rounds is the decimal number of rounds to use (40000 in the example). * salt is 0-64 characters drawn from [./0-9A-Za-z] (jtNX3nZ2 in the example). * checksum is 28 characters drawn from the same set, encoding a 168-bit checksum. */ static struct fmt_tests tests[] = { {"$sha1$64000$wnUR8T1U$vt1TFQ50tBMFgkflAFAOer2CwdYZ", "password"}, {"$sha1$40000$jtNX3nZ2$hBNaIXkt4wBI2o5rsi8KejSjNqIq", "password"}, {"$sha1$64000$wnUR8T1U$wmwnhQ4lpo/5isi5iewkrHN7DjrT", "123456"}, {"$sha1$64000$wnUR8T1U$azjCegpOIk0FjE61qzGWhdkpuMRL", "complexlongpassword@123456"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct saltstruct { unsigned int length; unsigned int rounds; unsigned char salt[SALT_LENGTH+sizeof(SHA1_MAGIC)+7+1]; // allows up to 9999999 sized rounds with 64 byte salt. } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static int valid(char * ciphertext, struct fmt_main * self) { char *p, *keeptr, tst[24]; unsigned rounds; if (strncmp(ciphertext, SHA1_MAGIC, sizeof(SHA1_MAGIC) - 1)) return 0; // validate rounds keeptr = strdup(ciphertext); p = &keeptr[sizeof(SHA1_MAGIC)-1]; if ((p = strtok(p, "$")) == NULL) /* rounds */ goto err; rounds = strtoul(p, NULL, 10); sprintf(tst, "%u", rounds); if (strcmp(tst, p)) goto err; // validate salt if ((p = strtok(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) > SALT_LENGTH || strlen(p) != base64_valid_length(p, e_b64_crypt, 0)) goto err; // validate checksum if ((p = strtok(NULL, "$")) == NULL) /* checksum */ goto err; if (strlen(p) > CHECKSUM_LENGTH || strlen(p) != base64_valid_length(p, e_b64_crypt, 0)) goto err; if (strtok(NULL, "$")) goto err; MEM_FREE(keeptr); return 1; err:; MEM_FREE(keeptr); return 0; } #define TO_BINARY(b1, b2, b3) \ value = (ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; static void * get_binary(char * ciphertext) { static union { unsigned char c[BINARY_SIZE + 16]; ARCH_WORD dummy; ARCH_WORD_32 swap[1]; } buf; unsigned char *out = buf.c; ARCH_WORD_32 value; char *pos = strrchr(ciphertext, '$') + 1; int i = 0; do { TO_BINARY(i, i + 1, i + 2); i = i + 3; } while (i <= 18); #if (ARCH_LITTLE_ENDIAN==0) for (i = 0; i < sizeof(buf.c)/4; ++i) { buf.swap[i] = JOHNSWAP(buf.swap[i]); } #endif return (void *)out; } static void set_key(char *key, int index) { int len = strlen(key); if (len > PLAINTEXT_LENGTH) len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); saved_key[index][len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SSE_GROUP_SZ_SHA1 int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf1_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), BINARY_SIZE, 0); #else pbkdf1_sha1((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], BINARY_SIZE, 0); #endif } return count; } static void set_salt(void *salt) { cur_salt = salt; } static void *get_salt(char *ciphertext) { static struct saltstruct out; char tmp[sizeof(out.salt)]; char *p; memset(&out, 0, sizeof(out)); p = strrchr(ciphertext, '$') + 1; strnzcpy(tmp, ciphertext, p - ciphertext); out.rounds = strtoul(&ciphertext[sizeof(SHA1_MAGIC)-1], NULL, 10); // point p to the salt value, BUT we have to decorate the salt for this hash. p = strrchr(tmp, '$') + 1; // real salt used is: <salt><magic><iterations> out.length = snprintf((char*)out.salt, sizeof(out.salt), "%.*s%s%u", (int)strlen(p), p, SHA1_MAGIC, out.rounds); return &out; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein // We are hashing the entire struct static int salt_hash(void *salt) { unsigned char *s = salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < SALT_SIZE; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct saltstruct *p = (struct saltstruct *)salt; return p->rounds; } #endif struct fmt_main fmt_cryptsha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
triMeshAcceleratorBVH.h
#pragma once #ifndef _TRIMESH_ACCELERATOR_BVH_H_ #define _TRIMESH_ACCELERATOR_BVH_H_ namespace ml { template <class FloatType> struct TriangleBVHNode { TriangleBVHNode() : rChild(0), lChild(0), leafTri(0) {} ~TriangleBVHNode() { SAFE_DELETE(rChild); SAFE_DELETE(lChild); } //wait for vs 2013 //template<class T> //using Triangle = TriMesh::Triangle<T>; BoundingBox3<FloatType> boundingBox; const typename TriMesh<FloatType>::Triangle* leafTri; TriangleBVHNode<FloatType> *lChild; TriangleBVHNode<FloatType> *rChild; void computeBoundingBox() { boundingBox.reset(); if (!lChild && !rChild) { leafTri->includeInBoundingBox(boundingBox); } else { if (lChild) { lChild->computeBoundingBox(); boundingBox.include(lChild->boundingBox); } if (rChild) { rChild->computeBoundingBox(); boundingBox.include(rChild->boundingBox); } } } void splitMidPoint(typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& begin, typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& end) { if (end - begin > 1) { //determine longest axis BoundingBox3<FloatType> bbox; for (auto iter = begin; iter != end; iter++) { bbox.include((*iter)->getCenter()); } FloatType maxExtent = bbox.getMaxExtent(); typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator midIter = begin + 1; if (bbox.getExtentX() > bbox.getExtentY() && bbox.getExtentX() > bbox.getExtentZ()) { //x std::stable_sort(begin, end, cmpX); FloatType middle = bbox.getMinX() + maxExtent / 2; for (; midIter != end - 1; midIter++) { if ((*midIter)->getCenter().x >= middle) break; } } else if (bbox.getExtentY() > bbox.getExtentX() && bbox.getExtentY() > bbox.getExtentZ()) { //y std::stable_sort(begin, end, cmpY); FloatType middle = bbox.getMinY() + maxExtent / 2; for (; midIter != end - 1; midIter++) { if ((*midIter)->getCenter().y >= middle) break; } } else { //z std::stable_sort(begin, end, cmpZ); FloatType middle = bbox.getMinZ() + maxExtent / 2; for (; midIter != end - 1; midIter++) { if ((*midIter)->getCenter().z >= middle) break; } } lChild = new TriangleBVHNode; rChild = new TriangleBVHNode; lChild->splitMidPoint(begin, midIter); rChild->splitMidPoint(midIter, end); } else { assert(end - begin == 1); leafTri = *begin; //found a leaf } } void splitMedian(typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& begin, typename std::vector<typename TriMesh<FloatType>::Triangle*>::iterator& end, unsigned int lastSortAxis) { if (end - begin > 1) { if (lastSortAxis == 0) std::stable_sort(begin, end, cmpX); else if (lastSortAxis == 1) std::stable_sort(begin, end, cmpY); else std::stable_sort(begin, end, cmpZ); lChild = new TriangleBVHNode; rChild = new TriangleBVHNode; const unsigned int newSortAxis = (lastSortAxis + 1) % 3; lChild->splitMedian(begin, begin + ((end - begin) / 2), newSortAxis); rChild->splitMedian(begin + ((end - begin) / 2), end, newSortAxis); } else { assert(end - begin == 1); leafTri = *begin; //found a leaf } } inline bool isLeaf() const { return !(lChild || rChild); } const typename TriMesh<FloatType>::Triangle* intersect(const Ray<FloatType> &r, FloatType& t, FloatType& u, FloatType& v, FloatType& tmin, FloatType& tmax, bool onlyFrontFaces = false) const { if (t < tmin || t > tmax) return nullptr; //early out (warning t must be initialized) if (boundingBox.intersect(r, tmin, tmax)) { if (isLeaf()) { if (leafTri->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces)) { tmax = t; return leafTri; } } else { const typename TriMesh<FloatType>::Triangle* t0 = lChild->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces); const typename TriMesh<FloatType>::Triangle* t1 = rChild->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces); if (t1) return t1; if (t0) return t0; } } return nullptr; } // collisions with other Triangles bool intersects(const typename TriMesh<FloatType>::Triangle* tri) const { if (boundingBox.intersects(tri->getV0().position, tri->getV1().position, tri->getV2().position)) { if (isLeaf()) { return tri->intersects(*leafTri); } else { return lChild->intersects(tri) || rChild->intersects(tri); } } else { return false; } } bool intersects(const typename TriMesh<FloatType>::Triangle* tri, const Matrix4x4<FloatType>& transform) const { typename TriMesh<FloatType>::Vertex v0(transform * tri->getV0().position); typename TriMesh<FloatType>::Vertex v1(transform * tri->getV1().position); typename TriMesh<FloatType>::Vertex v2(transform * tri->getV2().position); typename TriMesh<FloatType>::Triangle triTrans(&v0,&v1,&v2); if (boundingBox.intersects(triTrans.getV0().position, triTrans.getV1().position, triTrans.getV2().position)) { if (isLeaf()) { return triTrans.intersects(*leafTri); } else { return lChild->intersects(&triTrans) || rChild->intersects(&triTrans); } } else { return false; } } // collisions with other TriangleBVHNodes bool intersects(const TriangleBVHNode& other) const { if (boundingBox.intersects(other.boundingBox)) { if (isLeaf()) { return other.intersects(leafTri); } else { return lChild->intersects(other) || rChild->intersects(other); } } else { return false; } } bool intersects(const TriangleBVHNode& other, const Matrix4x4<FloatType>& transform) const { if (boundingBox.intersects(other.boundingBox * transform)) { //TODO fix OBB if (isLeaf()) { return other.intersects(leafTri, transform.getInverse()); } else { return lChild->intersects(other, transform) || rChild->intersects(other, transform); } } else { return false; } } bool collisionBBoxOnly(const TriangleBVHNode& other, const Matrix4x4<FloatType>& transform) const { if (boundingBox.intersects(other.boundingBox * transform)) { //TODO fix OBB if (isLeaf()) { return true; } else { return lChild->collisionBBoxOnly(other, transform) || rChild->collisionBBoxOnly(other, transform); } } else { return false; } } unsigned int getTreeDepthRec() const { unsigned int maxDepth = 0; if (lChild) maxDepth = std::max(maxDepth, lChild->getTreeDepthRec()); if (rChild) maxDepth = std::max(maxDepth, rChild->getTreeDepthRec()); return maxDepth+1; } unsigned int getNumNodesRec() const { unsigned int numNodes = 1; if (lChild) numNodes += lChild->getNumNodesRec(); if (rChild) numNodes += rChild->getNumNodesRec(); return numNodes; } unsigned int getNumLeaves() const { unsigned int numLeaves = 0; if (lChild) numLeaves += lChild->getNumLeaves(); if (rChild) numLeaves += rChild->getNumLeaves(); if (!lChild && !rChild) { assert(leafTri); numLeaves++; } return numLeaves; } static bool cmpX(typename TriMesh<FloatType>::Triangle *t0, typename TriMesh<FloatType>::Triangle *t1) { return t0->getCenter().x < t1->getCenter().x; } static bool cmpY(typename TriMesh<FloatType>::Triangle *t0, typename TriMesh<FloatType>::Triangle *t1) { return t0->getCenter().y < t1->getCenter().y; } static bool cmpZ(typename TriMesh<FloatType>::Triangle *t0, typename TriMesh<FloatType>::Triangle *t1) { return t0->getCenter().z < t1->getCenter().z; } }; template <class FloatType> class TriMeshAcceleratorBVH : public TriMeshRayAccelerator<FloatType>, public TriMeshCollisionAccelerator<FloatType, TriMeshAcceleratorBVH<FloatType>> { public: TriMeshAcceleratorBVH() { m_Root = nullptr; } TriMeshAcceleratorBVH(const TriMesh<FloatType>& triMesh, bool storeLocalCopy = false) { m_Root = nullptr; build(triMesh, storeLocalCopy); //std::vector<const TriMesh<FloatType>*> meshes; //meshes.push_back(&triMesh); //build(meshes, true); //std::vector<std::pair<const TriMesh<FloatType>*, Matrix4x4<FloatType>>> meshes; //meshes.push_back(std::make_pair(&triMesh, Matrix4x4<FloatType>::identity())); //build(meshes); } ~TriMeshAcceleratorBVH() { SAFE_DELETE(m_Root); } void printInfo() const { std::cout << "Info: TriangleBVHAccelerator build done ( " << TriMeshRayAccelerator<FloatType>::m_TrianglePointers.size() << " tris )" << std::endl; std::cout << "Info: Tree depth " << m_Root->getTreeDepthRec() << std::endl; std::cout << "Info: NumNodes " << m_Root->getNumNodesRec() << std::endl; std::cout << "Info: NumLeaves " << m_Root->getNumLeaves() << std::endl; } private: //! defined by the interface bool collisionInternal(const TriMeshAcceleratorBVH<FloatType>& other) const { return m_Root->intersects(*other.m_Root); } bool collisionTransformInternal(const TriMeshAcceleratorBVH<FloatType>& other, const Matrix4x4<FloatType>& transform) const { return m_Root->intersects(*other.m_Root, transform); } bool collisionTransformBBoxOnlyInternal(const TriMeshAcceleratorBVH<FloatType>& other, const Matrix4x4<FloatType>& transform) const { return m_Root->collisionBBoxOnly(*other.m_Root, transform); } //! defined by the interface const typename TriMesh<FloatType>::Triangle* intersectInternal(const Ray<FloatType>& r, FloatType& t, FloatType& u, FloatType& v, FloatType tmin = (FloatType)0, FloatType tmax = std::numeric_limits<FloatType>::max(), bool onlyFrontFaces = false) const { u = v = std::numeric_limits<FloatType>::max(); t = tmax; //TODO MATTHIAS: probably we don't have to track tmax since t must always be smaller than the prev return m_Root->intersect(r, t, u, v, tmin, tmax, onlyFrontFaces); } //! defined by the interface void buildInternal() { SAFE_DELETE(m_Root); bool useParallelBuild = true; if (useParallelBuild) { buildParallel(TriMeshRayAccelerator<FloatType>::m_TrianglePointers); } else { buildRecursive(TriMeshRayAccelerator<FloatType>::m_TrianglePointers); } } void buildParallel(std::vector<typename TriMesh<FloatType>::Triangle*>& tris) { struct NodeEntry { size_t begin; size_t end; TriangleBVHNode<FloatType> *node; }; std::vector<NodeEntry> currLevel(1); m_Root = new TriangleBVHNode<FloatType>; currLevel[0].node = m_Root; currLevel[0].begin = 0; currLevel[0].end = tris.size(); unsigned int lastSortAxis = 0; bool needFurtherSplitting = true; while(needFurtherSplitting) { needFurtherSplitting = false; std::vector<NodeEntry> nextLevel(currLevel.size()*2); #pragma omp parallel for for (int i = 0; i < (int)std::min(currLevel.size(),tris.size()); i++) { const size_t begin = currLevel[i].begin; const size_t end = currLevel[i].end; if (end - begin > 1) { if (lastSortAxis == 0) std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpX); else if (lastSortAxis == 1) std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpY); else std::stable_sort(tris.begin()+begin, tris.begin()+end, TriangleBVHNode<FloatType>::cmpZ); TriangleBVHNode<FloatType>* node = currLevel[i].node; TriangleBVHNode<FloatType>* lChild = new TriangleBVHNode<FloatType>; TriangleBVHNode<FloatType>* rChild = new TriangleBVHNode<FloatType>; node->lChild = lChild; node->rChild = rChild; nextLevel[2*i+0].begin = begin; nextLevel[2*i+0].end = begin + ((end-begin)/2); nextLevel[2*i+1].begin = begin + ((end-begin)/2); nextLevel[2*i+1].end = end; nextLevel[2*i+0].node = currLevel[i].node->lChild; nextLevel[2*i+1].node = currLevel[i].node->rChild; if (nextLevel[2*i+0].end - nextLevel[2*i+0].begin < 2) lChild->leafTri = tris[nextLevel[2*i+0].begin]; else needFurtherSplitting = true; if (nextLevel[2*i+1].end - nextLevel[2*i+1].begin < 2) rChild->leafTri = tris[nextLevel[2*i+1].begin]; else needFurtherSplitting = true; } } if (needFurtherSplitting) { currLevel = nextLevel; lastSortAxis = (lastSortAxis+1)%3; } } m_Root->computeBoundingBox(); } void buildRecursive(std::vector<typename TriMesh<FloatType>::Triangle*>& tris) { assert(tris.size() > 2); m_Root = new TriangleBVHNode<FloatType>; //m_Root->splitMedian(tris.begin(), tris.end(), 0); m_Root->splitMidPoint(tris.begin(), tris.end()); m_Root->computeBoundingBox(); } //! private data TriangleBVHNode<FloatType>* m_Root; }; typedef TriMeshAcceleratorBVH<float> TriMeshAcceleratorBVHf; typedef TriMeshAcceleratorBVH<double> TriMeshAcceleratorBVHd; } // namespace ml #endif
mpm_search_element_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Bodhinanda Chandra // #ifndef KRATOS_MPM_SEARCH_ELEMENT_UTILITY #define KRATOS_MPM_SEARCH_ELEMENT_UTILITY // System includes // External includes // Project includes #include "includes/define.h" #include "utilities/binbased_fast_point_locator.h" #include "particle_mechanics_application_variables.h" namespace Kratos { namespace MPMSearchElementUtility { typedef std::size_t IndexType; typedef std::size_t SizeType; /** * @brief Search element connectivity for each particle * @details A search is performed to know in which grid element the material point falls. * If one or more material points fall in the grid element, the grid element is * set to be active and its connectivity is associated to the material point * element. * STEPS: * 1) All the elements are set to be INACTIVE * 2) A searching is performed and the grid elements which contain at least a MP are set to be ACTIVE * */ template<std::size_t TDim> void SearchElement(ModelPart& rBackgroundGridModelPart, ModelPart& rMPMModelPart, const std::size_t MaxNumberOfResults, const double Tolerance) { // Reset elements to inactive #pragma omp parallel for for(int i = 0; i < static_cast<int>(rBackgroundGridModelPart.Elements().size()); ++i){ auto element_itr = rBackgroundGridModelPart.Elements().begin() + i; auto& rGeom = element_itr->GetGeometry(); element_itr->Reset(ACTIVE); for (IndexType j=0; j < rGeom.PointsNumber(); ++j) rGeom[j].Reset(ACTIVE); } // Search background grid and make element active Vector N; const int max_result = 1000; #pragma omp parallel { BinBasedFastPointLocator<TDim> SearchStructure(rBackgroundGridModelPart); SearchStructure.UpdateSearchDatabase(); typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_result); // Element search and assign background grid #pragma omp for for(int i = 0; i < static_cast<int>(rMPMModelPart.Elements().size()); ++i){ auto element_itr = rMPMModelPart.Elements().begin() + i; const array_1d<double,3>& xg = element_itr->GetValue(MP_COORD); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg, N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { pelem->Set(ACTIVE); element_itr->GetGeometry() = pelem->GetGeometry(); auto& rGeom = element_itr->GetGeometry(); for (IndexType j=0; j < rGeom.PointsNumber(); ++j) rGeom[j].Set(ACTIVE); } else{ KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point: " << element_itr->Id() << " is failed. Geometry is cleared." << std::endl; element_itr->GetGeometry().clear(); element_itr->Reset(ACTIVE); element_itr->Set(TO_ERASE); } } // Condition search and assign background grid #pragma omp for for(int i = 0; i < static_cast<int>(rMPMModelPart.Conditions().size()); ++i){ auto condition_itr = rMPMModelPart.Conditions().begin() + i; if (condition_itr->Has(MPC_COORD)){ const array_1d<double,3>& xg = condition_itr->GetValue(MPC_COORD); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg, N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { pelem->Set(ACTIVE); condition_itr->GetGeometry() = pelem->GetGeometry(); auto& rGeom = condition_itr->GetGeometry(); for (IndexType j=0; j < rGeom.PointsNumber(); ++j) rGeom[j].Set(ACTIVE); } else{ KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point Condition: " << condition_itr->Id() << " is failed. Geometry is cleared." << std::endl; condition_itr->GetGeometry().clear(); condition_itr->Reset(ACTIVE); condition_itr->Set(TO_ERASE); } } } } } } // end namespace MPMSearchElementUtility } // end namespace Kratos #endif // KRATOS_MPM_SEARCH_ELEMENT_UTILITY
irbuilder_for_unsigned_dynamic_chunked.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@workshareloop_unsigned_dynamic_chunked( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 33, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: store i32 1, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: store i32 %[[DOTCOUNT]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_dispatch_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 35, i32 1, i32 %[[DOTCOUNT]], i32 1, i32 5) // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER_OUTER_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER:.*]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ %[[LB:.+]], %[[OMP_LOOP_PREHEADER_OUTER_COND]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[UB]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_PREHEADER_OUTER_COND]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP4]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP7]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]] // CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP10]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]] // CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP13]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT:.*]]: // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]]) // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER_OUTER_COND]]: // CHECK-NEXT: %[[TMP14:.+]] = call i32 @__kmpc_dispatch_next_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]]) // CHECK-NEXT: %[[TMP15:.+]] = icmp ne i32 %[[TMP14]], 0 // CHECK-NEXT: %[[TMP16:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[LB]] = sub i32 %[[TMP16]], 1 // CHECK-NEXT: br i1 %[[TMP15]], label %[[OMP_LOOP_HEADER]], label %[[OMP_LOOP_EXIT]] // CHECK-NEXT: } extern "C" void workshareloop_unsigned_dynamic_chunked(float *a, float *b, float *c, float *d) { #pragma omp for schedule(dynamic, 5) for (unsigned i = 33; i < 32000000; i += 7) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45} // CHECK: ![[META2:[0-9]+]] =
cq_fmt_plug.c
/* * This software is Copyright (c) Peter Kasza <peter.kasza at itinsight.hu>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cq; #elif FMT_REGISTERS_H john_register_one(&fmt_cq); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 // core i7 no HT #endif #endif #include "arch.h" #include "misc.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "cq" #define FORMAT_NAME "ClearQuest" #define FORMAT_TAG "$cq$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "CQWeb" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE 64 // XXX double check this #define SALT_ALIGN MEM_ALIGN_NONE #define BINARY_SIZE 4 #define BINARY_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 512 static struct fmt_tests cq_tests[] = { {"$cq$admin$a9db7ca6", ""}, {"$cq$admin$10200218", "admin"}, {"$cq$admin$4cfb73f2", "password"}, {"$cq$clearquest$a279b184", "clearquest"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; static char saved_salt[SALT_SIZE]; unsigned int AdRandomNumbers[2048] = { 0x7aa03f9e, 0x2be5e9c7, 0x1b5ceb7b, 0x32243048, 0x3cb12e04, 0xe90d2e8f, 0xace8842a, 0xdbc021e2, 0xdb7e4414, 0x9414168d, 0xec94d186, 0xb0d45b52, 0xefa8a505, 0xee4ac734, 0xee7f3583, 0xf37a1bd0, 0x258cd1c7, 0xa93a5bf7, 0x347a23f6, 0xbf68b272, 0x5c89e744, 0x4faa8fc0, 0x54fa4bc1, 0x8f4db7cc, 0xcc78a54c, 0x84012379, 0xbb997725, 0x234612c5, 0x9b8a7120, 0xa2ea15c9, 0xa1b03515, 0xd68c512c, 0x90cbbcb0, 0xa677c1d3, 0xad4edf73, 0x0fbb4c6c, 0x7a70637d, 0x920c3ef4, 0xc31f9edf, 0xc0c29c40, 0xe0547468, 0x8af5778a, 0x4910f9e4, 0x553744df, 0xe9e5d82f, 0x9f648de3, 0xc366b6e0, 0xd2b1f83d, 0xca04733f, 0xcf3603b1, 0x27a7e70f, 0x9981f60a, 0xdd4e2cf8, 0xbcb811b3, 0x5d74ecc1, 0xa48e7106, 0x16539a54, 0xbdce7967, 0xf08c13d2, 0x2698c222, 0x3343d62d, 0xebf68da7, 0xc2bc9948, 0xea757b40, 0x37f0de67, 0xa03a4d89, 0x2cfc3cb7, 0x89230393, 0x6711f91d, 0x3812fb31, 0x9a105ad2, 0xf713d767, 0x4bd0d6c5, 0x4e4f9760, 0x9144e67b, 0x2b5b1540, 0xed6f8cd3, 0x1ab6de1d, 0x94fd67ae, 0x5fec29f0, 0x9d2f6c66, 0x701797be, 0x7ecd184e, 0x0e58eba6, 0x189fd557, 0xe70a447f, 0x3140d5ce, 0x46d55d28, 0xf77cb54a, 0x2eb11f3e, 0x331059a8, 0x1bcb4f0e, 0x253b1132, 0x9ed96195, 0xfadee553, 0x60939883, 0x76ea1724, 0x64ad91a9, 0xce605783, 0x5b17c228, 0xbb45f357, 0xee888899, 0xe36bda7e, 0xb1e6fdb6, 0xd47b22cb, 0xba2b4b6f, 0x31eb5d46, 0x72865ff5, 0xecc9077f, 0xe59c7a9a, 0xe2484234, 0x6c250954, 0x75bae5d1, 0x7d8ffc5c, 0x14aa2302, 0x0ea92748, 0xfc9fe3aa, 0x28295969, 0x2f38760d, 0xd335cab1, 0x7ffcaead, 0xcdeb00fc, 0x5aa4766f, 0xc57deab9, 0xfbeb9ac4, 0xfc59f737, 0xdc8a3d1d, 0xd5a04c8a, 0x63afc85c, 0x5a062866, 0x7feadb01, 0x9d29586e, 0x4bc63a4e, 0xc2e7a653, 0x80d132b7, 0x306d04e9, 0x27b4e1df, 0x2d4e5f39, 0xbc7f2f05, 0x8efb7bc4, 0x834368e0, 0xe37ac5c2, 0x07f7741c, 0x7d70f32b, 0xd8fc568c, 0x52e86793, 0x8a95993e, 0xc086bd26, 0xa0d0c8f9, 0x2262519a, 0xc7a79adc, 0x658ee58a, 0x94376223, 0x631fbdc3, 0x3433f9b0, 0xe469e054, 0x7b187772, 0x6ea94b34, 0x88515df2, 0xaa7c0e2b, 0xd688308e, 0x58f4d8de, 0xb3df5108, 0xa977f53d, 0x2f8bf273, 0x98c61997, 0x4c4f7cbc, 0x81a5a959, 0x8b38b887, 0x17c0d440, 0x03459240, 0x0e049148, 0xfa28dda2, 0xa364ab5b, 0x1f779ce8, 0x9013690f, 0xbf9b1284, 0x7a1704ac, 0x9deb5e71, 0x1b97bc67, 0x8560a7c3, 0xf95dc105, 0x5be1c16f, 0xa6f93b31, 0x9d540073, 0xbe36f269, 0xec1ad083, 0xb0d82bf0, 0xa362120c, 0x5d40e591, 0x4383ee00, 0xfc2bd2b3, 0x03415c36, 0x3514e8f4, 0x39fce373, 0xd8b0083c, 0xfaaf9375, 0xad0cf95b, 0x7618f7bd, 0xa1172c4f, 0xe4d6fc97, 0xd0d7d41c, 0x665d7461, 0x05a9ffd1, 0xdd7380d8, 0x2c334bab, 0xe89378a6, 0xe1b5a151, 0xb3d52dec, 0x3298d083, 0x2ef54a8c, 0x06f48c54, 0x34172d75, 0xee7a4a5b, 0x0bcbb2c4, 0xedf58b49, 0x8bbc49f3, 0x3c5c2876, 0xf8faa9d2, 0xd5c27925, 0x8b31e871, 0xce021186, 0xff86fbb1, 0xce65d1a6, 0x2ddc62f1, 0x4be585e1, 0x69554205, 0x4a4144f0, 0xd8658572, 0x7ecdf4e5, 0xeca38dba, 0xba099ee0, 0x9b776c4e, 0x406aa6c7, 0x1b2920d2, 0x20a4fe17, 0x7eefab23, 0xf6cedf61, 0x580739bc, 0xdb0d8ee2, 0xbcceef67, 0xbfeaeedf, 0x396c4060, 0xccb400cc, 0x2e411cbb, 0x671672e1, 0x25a6139b, 0x9ee52aa1, 0x7494b0f1, 0xd967d261, 0x7c3351f6, 0xcb1b564c, 0x3ebd0cbf, 0x1e451dcb, 0x197de7e2, 0xb988e765, 0x4a56963b, 0x1c1c8571, 0xc77711b8, 0x56a92f0e, 0xb480334e, 0x283314d6, 0xa2905c9a, 0xfb7a96f1, 0x033f43a9, 0xea71059a, 0x266c4710, 0x4528c417, 0x6f3a6608, 0x45699d70, 0xd72db23a, 0x411dfdeb, 0xed52ad3d, 0x9c1cd588, 0x15e8e29d, 0xe36911aa, 0x2e65957b, 0x085d2e03, 0x419ee083, 0x0b257fb9, 0x30aa0a12, 0xdc7358a7, 0xaa9d75b8, 0xcc040ac1, 0x178035c6, 0x53916d0e, 0x54f00e79, 0xf5de4034, 0xd66ff28b, 0x77fd3f74, 0x512ca7d5, 0xd6e48b64, 0xbeb2e6e6, 0xfbbc9e4d, 0xfae7f661, 0x8d529091, 0xd43abeab, 0xc445eab2, 0x31c276ea, 0x3b02aa3c, 0x104555ae, 0xf31fca4c, 0x7c86bda3, 0xbb8fa8e1, 0x561ef23b, 0xc7c3ddac, 0xc049a3b2, 0xb55ce3c1, 0x793c8199, 0xaf9cf13b, 0xd1bedce3, 0x5c9d8d47, 0x975973cd, 0xcc747711, 0xecc9b8cd, 0xc38edb0e, 0x10d46466, 0xd1742665, 0x34de2abc, 0x6af0415f, 0x33eaabe7, 0xeb5b3c88, 0xe45dbb41, 0x46a60558, 0xec3ee7fb, 0x8648812f, 0x779004e2, 0x957369ad, 0x2859ec9e, 0xd7500913, 0x7ffdbaff, 0x653c7581, 0xcb2fbe4e, 0x889a0521, 0xc2c47e53, 0x27357d53, 0x6f8e3752, 0x14fed547, 0x78f92e55, 0x12168b52, 0x0cecbc88, 0xadc37f82, 0x5c65b902, 0x464adda3, 0x51701de4, 0x94f581be, 0x3a8bfe19, 0xd928e9fc, 0x4e621fd7, 0x463c7b1a, 0xa5d610c6, 0x5225860f, 0x31f71d52, 0x59da3e38, 0xf979401f, 0x457831b6, 0x19b19ff9, 0x212898b8, 0x9d9a330d, 0x9cbbc514, 0xb1b28903, 0x92161213, 0xcd13324f, 0xa7d072ff, 0x314eb9e8, 0xbf0981ce, 0x00212756, 0xa4aa1438, 0x4b8a6088, 0x35cfaec8, 0xdd1def33, 0x7a868772, 0x43faf1be, 0x7140cda5, 0x86a8ccf2, 0xaa95fc06, 0x19eaaee0, 0x309e6b2a, 0x6943426b, 0x2c687aca, 0x9aa1e0dd, 0x5bf701cd, 0x129364b8, 0xeac81cbf, 0x3a1193ec, 0xd224cb9d, 0x0bbd5135, 0x82eb9c9d, 0x3c965dbf, 0xf6779412, 0xc71f63da, 0x28eedaa1, 0x63a4f5ba, 0x07cdd4e0, 0x072faeff, 0x21ff534d, 0x0d35cc3f, 0x4e0d4e2e, 0x75851a2f, 0x2e6779f2, 0xda7a104a, 0xbf129c30, 0x8b6305e9, 0x8d5bd156, 0xc3143454, 0x01f988a1, 0x55930029, 0xfc2f1619, 0xc4f9f598, 0x60c2ca05, 0x0c7138d9, 0xb3a34e45, 0x2f228b1a, 0x8ce79f7b, 0x0433eefb, 0x0bba45cc, 0xd9b8abdc, 0x77798336, 0xd46b5757, 0xf3f4308a, 0x8e4984c1, 0x18d3dff2, 0x43cd6cbb, 0x475cab00, 0xa0baa888, 0x7d688498, 0x33975596, 0xa8e3b619, 0x8258d065, 0x1e939418, 0xae47dcaf, 0x56a1311b, 0x41650078, 0x9ca8280b, 0x2df5bb50, 0x8ca64fba, 0x840e8608, 0x0ea9643d, 0x64666095, 0x76ae3ba8, 0x31409202, 0x5c076878, 0xa15154f5, 0xa33ad25c, 0x5d76ea5e, 0x89d98e0c, 0x7d0888c2, 0xce8ea846, 0x5422cb9c, 0x4ed2233e, 0x7a956e89, 0x9f132d5d, 0xb21ecaf1, 0xaae3eeb0, 0x5b6ca2c8, 0x4dc9ee83, 0x970b1474, 0x990ccca3, 0x223acb1d, 0x8136038e, 0xd3f3f287, 0x869f34e7, 0x11966837, 0x9952dab8, 0xdb3077d8, 0x9fe3cdd8, 0x892915cf, 0x658ae998, 0x6fa07e4a, 0xe28746ed, 0xa7821e30, 0x59d88b18, 0x1d1a7922, 0xe19a82ec, 0xd7e17b1b, 0x4d77aaee, 0x954c1f25, 0xad312a39, 0xb2fc818a, 0x29d17df3, 0x498f9862, 0xbba03635, 0xa4be43d1, 0x712e234c, 0xee3d9921, 0x87d1e33d, 0x8606d3cf, 0xc252595e, 0x6dd206de, 0xd381f5bb, 0xc0d9420f, 0x459e1991, 0x0c1892b8, 0xc79fd935, 0x416cdabe, 0xb57690a6, 0x087ea862, 0x763aff63, 0x15337abd, 0x6455d3f6, 0x524034c7, 0x7bb42336, 0x41669b3f, 0x26574372, 0xbc949e2e, 0x211af8fb, 0x7416c18e, 0x6c7c01c8, 0x20edce1e, 0xf858ab54, 0x2cde56af, 0xd4a31e32, 0xb6947234, 0xa8694d27, 0x90f90227, 0x257413b1, 0xacd234a4, 0x17e19de7, 0x71b63409, 0x8d67fb39, 0x0217d598, 0xcb548c2d, 0xce3c13da, 0x2d6c3984, 0xcc3c93db, 0x6d10ae7c, 0x893b3eb1, 0xcf58d7d6, 0x8038f673, 0xf5d60b09, 0x85c6ea1c, 0xbe121052, 0x4519c0cd, 0xe4032569, 0xc0a8e700, 0x2a72e706, 0x8534a64f, 0x9f4a1361, 0xd4ef416b, 0x43336420, 0x323e2a96, 0xd5ea02c9, 0x59782e1b, 0x4c4e02e2, 0x75171d80, 0x6aa8fba1, 0xfdc8233c, 0x36eee3c0, 0x18643046, 0xce2d3fe4, 0x73ed0154, 0xfc8dfea4, 0xd43c9f90, 0x2bbdf9fa, 0x0dd43b40, 0x74c54101, 0xd4dd41ed, 0xf06a79a5, 0xb85687ae, 0x5f85dfbd, 0xd630cc07, 0xd7105cd7, 0x46f30505, 0x15575a70, 0x1b3dff40, 0x24a9e552, 0xcc52f6c4, 0xddca62fe, 0x094da898, 0xbbf990e8, 0xd2531586, 0x79a6abe4, 0xd67bac65, 0xaf2e08a7, 0x36c28310, 0x85f3fb0a, 0x188a8332, 0x7ed37c9c, 0x9fd8d6c6, 0xba87d2eb, 0x0331d15f, 0x11438530, 0x86848311, 0x81b555f1, 0xf582213f, 0x1b39290e, 0x644ed913, 0x1a15eef2, 0x30df2149, 0x9aee5345, 0xdaf33a78, 0x1a36f066, 0xbc6d937e, 0xf0553748, 0x3c4d51a4, 0xac14c751, 0xc691eb3a, 0xd08c9cde, 0x93a2716c, 0xd46bddb8, 0x86504249, 0xd331a584, 0xbb0a34b0, 0xe095b710, 0x637512a4, 0x1b4b520d, 0xd51e5b11, 0x561b6ea5, 0xf1d870ab, 0xed478d20, 0x63e61e6d, 0xf159f48a, 0xc5e9d72c, 0x5c5ce2e0, 0x65f1e7a6, 0x3d98331a, 0x54596f93, 0x98a1fcef, 0xd603f018, 0xdf0a0fe9, 0x3133cc21, 0x1f66fa5a, 0x34d24f08, 0x3f6ffbdc, 0x8f5a7faf, 0xb6f7f344, 0x44e8f0fc, 0xb84d7ede, 0x6a193aa5, 0xd7846b26, 0xdc93b0fe, 0x8eb61507, 0x57502a82, 0xd0068b11, 0xbfc1a056, 0x5766badc, 0xcd80e8cb, 0x3ad80ff6, 0x6b07c122, 0x2fc821a0, 0x924b7f93, 0x0cb37099, 0x8f078060, 0x74ea3ad4, 0x12006e5d, 0x513db72f, 0x9ed5dc6d, 0x0f8763e1, 0xd3746c15, 0x69043f07, 0x2f1f0e29, 0x1e134f46, 0x1b673ace, 0x352869fa, 0xb903f872, 0xf44e83a9, 0x0358ba50, 0xad94f27d, 0xe3799bc7, 0x1aa584be, 0xbf6496e8, 0x020edea1, 0x8f9f1e56, 0x335d8aad, 0xb52f8536, 0x1047d008, 0x705fc7c5, 0x82a15021, 0x38ec73c5, 0x83bbeec3, 0xafe5dfad, 0x014d9399, 0x132c1fdf, 0x90781f7f, 0x9fdf14ca, 0x4f9e619e, 0xcffb2139, 0xd868773d, 0x53ab6332, 0xc7139a87, 0x069c6fa1, 0xaea8ddc8, 0xebf10a04, 0x594a74b2, 0xaf80e4a8, 0x4ea207a5, 0x0017f466, 0x780f7b67, 0x35b7e34c, 0xd5c9b942, 0x55096faf, 0xf46db3d1, 0x4ab94cc8, 0x2b69bb09, 0xd95f0cbc, 0x97694c99, 0xd1cfe216, 0xb21e227d, 0xea436ce4, 0xbd06f5f2, 0x69b7ed73, 0xdc368c1b, 0x6bc96d16, 0x00af13db, 0x33bac433, 0xe3b28392, 0x985ce7fa, 0x49981994, 0x3acd1335, 0x8a72de1d, 0x41500863, 0xa2eba987, 0x1928c7d1, 0xa599918a, 0x9b8ff4d9, 0x698b190b, 0xfdb34c56, 0x53fb5413, 0x4879c088, 0x3bef71a7, 0x54f32abc, 0xe3fe3908, 0xa8b79d24, 0x50f69baf, 0xe87713e1, 0xceeaa849, 0xb5054c6e, 0x99076ad6, 0x4cecff0b, 0x21880bcb, 0x78b035b6, 0x2ba89560, 0x82d35f09, 0xeb2456ed, 0xc404c4b3, 0xd88f0c49, 0xd07cb42f, 0x37c1d01d, 0xb81701af, 0x1b435610, 0x6945f3b8, 0xd1d51325, 0x1b89be74, 0x67fb8124, 0x48f5ee8e, 0x80181ba7, 0x10817a0c, 0x931890cb, 0xdd21311d, 0x8530cda9, 0x8e31ca20, 0x3bc790ba, 0xbb769f75, 0xd87d8134, 0x1e586f4e, 0x0afef375, 0x0756d640, 0x7e89338e, 0x7ffd1904, 0x83dbd0c7, 0xcfd2f15b, 0xfd2675d6, 0xab8cd735, 0x6c95ef68, 0x4e713995, 0x90835487, 0x193af1c9, 0xfe13ce8f, 0x82beba7f, 0x8a9c42b3, 0x44635fa6, 0xb5b71efc, 0x12b4e48a, 0xf7af1338, 0xd1ac39a8, 0xbacc03c1, 0xf24f7267, 0x8e0f9e90, 0x189cabb4, 0x3c8019a1, 0x4abecfee, 0x9d972bc8, 0xf5e931b0, 0xc26b7de1, 0x1d56f5d3, 0x77079560, 0x04ae0fac, 0x03220a78, 0xe0b95f42, 0x92d112ce, 0x2bd2afad, 0x80755047, 0x41a23ec9, 0xb49f602c, 0x3761a3f6, 0x935ee1cf, 0x1b64abe7, 0x006e19a9, 0x0b720985, 0xd8155ad3, 0x15539fef, 0xe2a32706, 0x86ac3c63, 0x5b606e14, 0x529e4a4e, 0x5d862f33, 0x68db23a7, 0x276819b7, 0x8c1e1a2d, 0xb8bdd15a, 0x7d0eb98e, 0xd3301dee, 0xeddd8b74, 0xb73dad26, 0xded6a141, 0x1f233c79, 0x324da954, 0x3eb44363, 0x530f5631, 0x60aa4163, 0xa0a53376, 0x46447217, 0x21c4a239, 0xf2813423, 0x72ec278c, 0x87b8bf19, 0x630ff66d, 0xd83cf3a0, 0x9f889ff6, 0xb99fc512, 0xce97a6ac, 0x018016d3, 0xb6c29965, 0xc06b4718, 0x837bdedb, 0x4cf09f8e, 0xe9dc4763, 0x7696df71, 0x03182a80, 0x4437d055, 0xea83a340, 0x749af1a9, 0xab377e67, 0x9c568a33, 0x15a7cce4, 0x7a0a58e1, 0xfa221945, 0x55bf880c, 0x31607015, 0xb3780687, 0x5cb9bd63, 0xfec69f6b, 0xd2e21926, 0x8b3dad02, 0x4f3b41bc, 0x75e0e8cc, 0x10589810, 0x5729ba38, 0xb0a588e1, 0x7c821c0a, 0xbc0075a0, 0x3ab20949, 0xaff6b176, 0xc8497f0b, 0x1871d8dc, 0x1ff4a370, 0x824bcf3b, 0x6eca58a6, 0xa009169f, 0xf610f928, 0x23a867df, 0x383522b8, 0xc5f83a93, 0x2c3cd773, 0x55dbf231, 0x7c64920b, 0x5ef0d8fa, 0x31db2bfe, 0x45439b02, 0x230e546f, 0xc48477fc, 0x7e83db3c, 0x989f9765, 0x36d109b3, 0x22d9ae17, 0x5b19aa54, 0xec1161f1, 0x4abd5b16, 0xf3264a50, 0x0488b52e, 0x3e0fba91, 0xd69b929c, 0x083c42ee, 0x4c37fc2d, 0xada375ec, 0xf4f80d69, 0xe645063a, 0xa9aa2bf5, 0xfb3169fb, 0xc2a162ab, 0x794a6f5b, 0x68838c2c, 0xf40b2c40, 0x4ef814f1, 0x4f847bc0, 0x5333a90e, 0x9b3f17a3, 0xa9af089f, 0xe1991375, 0x3f7384b6, 0x1fb942c7, 0x97365bba, 0xd3b6523a, 0x2d76c616, 0x5d7b944c, 0x9dffc0d9, 0x6ef0ca09, 0x745d606e, 0xc25f0d1d, 0x68335b59, 0xa5a9aa32, 0x5f609d6f, 0x58012e9c, 0xc138c974, 0x0c62589a, 0x54c7026b, 0x78576d4f, 0x7ab860fe, 0x673810e2, 0x3cd73808, 0xa20364a5, 0xc750409a, 0xaff51879, 0xa4a598b0, 0xc4f113e9, 0x28da785e, 0xf41e4752, 0xf0608ba0, 0xa3434cf2, 0x5591372b, 0x4aa08a13, 0x0ec92e82, 0x714a0880, 0x643b37f1, 0x8a2bb7a2, 0x7db8db33, 0x6f800d7f, 0xce583d48, 0x07517baf, 0xdc1d9e7d, 0xab9a13cd, 0x6f2614d0, 0xbf021e62, 0xbbd113d2, 0x426f03a7, 0xcd1cd97e, 0x321886e0, 0x894b01a2, 0x6ef2e9aa, 0x4aa8b1e0, 0xc2c2ff9a, 0x37a2d9a6, 0x3e4b059b, 0x4635f62b, 0x0ea4b349, 0x4bf910ea, 0x24d44dae, 0xc60dba2b, 0x017df1de, 0x7d7e20c1, 0x6db4c345, 0xfa704510, 0xcf23dded, 0x0c6f7db9, 0x863aea75, 0x2266ac92, 0x6434e1ec, 0xbd1f2c9f, 0x3a1f0877, 0x45c04d11, 0xc8c0a4de, 0x435beef5, 0x02c271e8, 0x86b845ba, 0x641c1fee, 0x33c0762f, 0x79d746f3, 0x2071528a, 0xd96a62b5, 0x6457d349, 0xbcf4079c, 0xb97d7c3f, 0x7bd9b37c, 0x8d22cb6d, 0xd108173d, 0x0d681c0c, 0xcf551b4f, 0x2c39510d, 0x9ebc64c9, 0x1ef42072, 0x12508fce, 0xb08f1e43, 0xe189dbf9, 0x210af02d, 0xce1ada8a, 0x5843de7d, 0xc8f2eb59, 0x97ebfc1f, 0xde0b9b2a, 0xc7ca01e9, 0x0a605020, 0x9d3265f2, 0x0ad04e2b, 0x3d203fe2, 0xb1cb3883, 0x7e3ee25a, 0x02483609, 0xac38260f, 0x6d028d74, 0xdffeb968, 0x1c1d8074, 0x53be10b2, 0x184ed1cb, 0x4e0bc9e4, 0x7d600a82, 0xd5efa6f7, 0xbe35cb9d, 0xe7aab6ff, 0xd24efe88, 0x4f95ee51, 0x531a720f, 0x9281a4d1, 0x349fe52e, 0x0372e4fb, 0xc0f4e1fe, 0xd22ca046, 0x7ad0d314, 0xb1917a84, 0x65ddfdc8, 0xf9e7d7f2, 0x731c3ae6, 0x3ccf4730, 0x97503482, 0x9a75a545, 0xf3713091, 0x7074223f, 0x5ac2d900, 0xbb5bbda3, 0xf3ac9fe5, 0xc5f432f7, 0xc06ba069, 0x59d175ee, 0xf6624a20, 0x5ad27321, 0x44ec92f7, 0x5cdcda3c, 0xe4bfbaae, 0x61a61fdb, 0xeb78d322, 0x8a225c70, 0x5af8aa8e, 0x98dfebfa, 0x5e09f08a, 0x955bc9fa, 0xaf28b29a, 0x9795588a, 0x3a880cb4, 0xacfb27cb, 0xfadfac71, 0x26f58cd2, 0x8a9a8481, 0x5c73cf8f, 0x0a764745, 0x8e7525c6, 0x5a48f4bd, 0x9a038d74, 0xbe052e79, 0x50b8ab7b, 0x847b3fd6, 0xfd7af126, 0x910b5700, 0x89f74a62, 0xeacb449c, 0xba8ab0ec, 0xfb0d55df, 0xc9000ded, 0xa0e4fdc0, 0xd7fdbcbc, 0x776ad910, 0xf8ebe464, 0xb78641a8, 0x9e19f911, 0xa39f3c7f, 0x28798682, 0x770c0c67, 0xd6618589, 0x3824cfb9, 0x284362cc, 0x6f801c7b, 0x44473f26, 0xeb97128c, 0x9f5e29f8, 0xae790aec, 0xda7ce4f2, 0x2c37baad, 0xe0733abb, 0x2f79c97c, 0x56f014f5, 0x436f9427, 0x3fa2c41e, 0x03f4016e, 0xf29583a0, 0xdaa06497, 0xaef999c4, 0xde6098ea, 0x49d744e4, 0x9ac363c4, 0x71b11295, 0x1056e853, 0xa50f1c79, 0xf58aee75, 0xd2d98a60, 0xcc086a21, 0x633306d2, 0x43516bca, 0xf24c6958, 0x75bf4c20, 0xd1aa09ce, 0xd98701cb, 0x6497d417, 0xf09334da, 0x74005f98, 0x72241183, 0x3b7be9f5, 0xf4e1cf5d, 0xb44fe087, 0x9b347ba7, 0xb662e5db, 0xaf570647, 0x1b3d5eac, 0xf2d33276, 0xf694fa88, 0xfbf7ee8c, 0x722c4ccd, 0xb164605e, 0xfa22d883, 0x716dbb44, 0x00289aec, 0xfeb07061, 0x82f6934c, 0x7b9f20ae, 0x3d2832b2, 0x2127f401, 0x7e614a69, 0xb48b17be, 0x1e9061e2, 0xcdacbff9, 0x29519f11, 0xeb31bcdb, 0xc6a9a031, 0xa1fc0693, 0xdaaacb3d, 0x00177418, 0xd3b045d4, 0x575792d3, 0x7cc871a9, 0x4107f556, 0xd5dd7111, 0xecb8a31b, 0x88f6384e, 0x3e9559d2, 0xb87eab52, 0xddc8d45c, 0xcee4cadb, 0xb9672b94, 0x786129c5, 0x8a238446, 0x5cc9ce36, 0x897b89a3, 0x248f233f, 0x93a01697, 0xa9e32583, 0x46318359, 0xb60bd733, 0xcb344995, 0x93fe4889, 0x60477ed8, 0xe0baae66, 0x6e5f97b1, 0x36c221f3, 0xdecfa578, 0x45a9e7a5, 0xee38c8e3, 0x94209438, 0x0850dc00, 0x21d4792c, 0x0bff88c7, 0xfa703b77, 0x30ce06ab, 0x77bd6475, 0x07909087, 0xc8d5076a, 0x613d19bb, 0xc50da2d5, 0x77314616, 0x10f1ca59, 0xf5ed6e4e, 0x98132237, 0xa67d0346, 0xd17c5af7, 0xe49b092a, 0x2c6ac00a, 0xd6a18085, 0xa608ec39, 0x6f93259e, 0xfcd1c805, 0x119aa599, 0xee224e6a, 0xcd42adfc, 0x5c62cac2, 0x62739935, 0xbb2899f7, 0x3f708519, 0x6cbf5af7, 0x23dab7fb, 0x4c335483, 0x30835257, 0xcb83d782, 0x60495aba, 0x9631c6ff, 0xcdab4568, 0xa9ddb16e, 0x41f50aca, 0x12347371, 0x8d2d523a, 0xf5106c41, 0xb257c349, 0x8da2838f, 0xf1dd89f0, 0xac2752a4, 0x55ea2767, 0x825e66f9, 0x70249c8e, 0x5c98333a, 0xb3bf59c7, 0x4600b593, 0xdb32d803, 0xa009b1a3, 0x573c455f, 0x93c9610b, 0x0f076b69, 0x362b989e, 0x31d3ecfc, 0x86f3c663, 0x4b7db83c, 0x1333370e, 0xef093b3c, 0x8fbfc042, 0x1299de1d, 0xb59f15e9, 0x70a306d4, 0xcdd49b7f, 0xe0e78ac4, 0x3c327e27, 0x88f19b26, 0x0e979bc4, 0xedffe30f, 0x4f5001a8, 0xc4f15293, 0xd622c7ff, 0x51effe17, 0x50bba6a6, 0xce0d506b, 0xb7652789, 0xc41720af, 0x53e907a8, 0x616fa770, 0x5d033835, 0x1039b042, 0x61e5b922, 0xfe81924a, 0xf355025c, 0x6dc5a3f1, 0x3c8f6e63, 0x5b5fe252, 0xa8b6f433, 0xac6fbfa2, 0xf6b0e5b9, 0x20f5b0b4, 0x70f68390, 0x61ad174c, 0x2d359dd8, 0xa4ad7281, 0x0e00eee7, 0x79abb5de, 0x3405bcf7, 0x581d66b4, 0x0279f078, 0xd38722d7, 0x247fe970, 0x3c031225, 0xda242c14, 0x2a2b7ae6, 0x218cef99, 0x4005f76a, 0x1ef3e30a, 0x03b957a3, 0x5f4bc0bb, 0x682bc52e, 0xfd640d9d, 0xbddf1fc4, 0xd8e6d767, 0x69b15d06, 0x5f62cd28, 0x7119ada8, 0x1c0b776a, 0x59bd0273, 0xf6a6a315, 0x62b75968, 0x3f216017, 0x10cb81c2, 0x055266ff, 0x7e4bab81, 0x4959ee06, 0x60876e87, 0x5d6ed71a, 0x8cf8c0fd, 0x7df4d611, 0x3cc63f3f, 0x2fb7033e, 0x26154a95, 0x34057009, 0x822e5c53, 0x8867b292, 0x2d784f05, 0x3e9e7ea3, 0x760d9500, 0x780a7bc4, 0xd36732b2, 0xf36d5b98, 0x20c71e2b, 0x2bc8e9a7, 0x096be252, 0xb9b9236b, 0x8071dfd4, 0xacfc1e15, 0xcbbde7c9, 0x5f33b8f0, 0xbb63b209, 0x551eb7d6, 0x113e505f, 0xb076c142, 0x43044856, 0xaf552da0, 0xd921227b, 0x10817109, 0xdf903cff, 0x1419a111, 0x620ee003, 0xb614f2da, 0x321961d0, 0x650e8b24, 0xd5b909be, 0x79c7fbf6, 0x5849e0b0, 0x9342c0ba, 0x0fbf4e5e, 0xcd01430f, 0xd80f501a, 0xcbde9f45, 0xd1f7c565, 0x20fd41c4, 0x5fef8bb7, 0x755e7eca, 0x611c1ad5, 0x3e40beb2, 0x9e864b82, 0x69973554, 0x1d1d7c51, 0x3541f0a5, 0xa2aef7cd, 0xafa3c0e1, 0x1140c1b7, 0x82f9c5fd, 0x0e53bda8, 0xeb48c46a, 0xe6e5f766, 0x686f4800, 0x84ff9918, 0x3a299c37, 0x7c3a324f, 0x25295743, 0xb9131e22, 0xa9467eff, 0x88bd0285, 0xfe7b56c4, 0x3774cab6, 0x8b709b30, 0xe98eb68f, 0xf99ed1ae, 0x21c8eda6, 0x8ceb5ee2, 0x24474545, 0x8709e6f9, 0x72b0e80f, 0x66d9c7f5, 0xe2ca210b, 0xdfbd7e43, 0xc9c5335c, 0x68b085ce, 0x021f3760, 0x2805cab4, 0xc2817224, 0x33054445, 0x05fc88f5, 0x4ef25d45, 0x1aed0636, 0xcaa0b185, 0xa8eb115b, 0x595bde5a, 0x7772b19b, 0x4dd2ca0d, 0xfc5c2185, 0x78c831dc, 0x4ffef944, 0x78430100, 0x03850200, 0x58c49038, 0x5aeee65f, 0x818f6b1d, 0x21ac1b4b, 0xecf90ec1, 0xfc678952, 0xe8340c7c, 0x51c1d8ca, 0xbfc33274, 0x1a83815c, 0x49512c1b, 0x4d262ce3, 0xab2674eb, 0xa84710f8, 0x96c698e5, 0xd045f84b, 0xc2ff8580, 0xf7a42aa9, 0x6eee74eb, 0xfde04cd3, 0xaa18c506, 0xcb3c95dc, 0x4761f04c, 0x01a75b02, 0x395cb0ce, 0x6269b03c, 0x7a4976a5, 0x982a10f3, 0x42cf92cd, 0x8c110454, 0x055408da, 0x0ada639f, 0xf3d74ec6, 0x3525b6b4, 0x21d1437d, 0x5ec84bc1, 0x5a8256e9, 0xe03bc870, 0xd4ec26ad, 0xff77bf3b, 0xf1af0c32, 0x8d8ed6df, 0x02fa5021, 0x7d04282b, 0x9d16455e, 0x5656ed1c, 0x86015646, 0x4e911190, 0x97108eb4, 0xb95d284c, 0x9d379b17, 0xe7ea9203, 0x57092245, 0xb821ff82, 0xefcef176, 0x4c7885d5, 0x36a3990d, 0x4949c392, 0x3a37d261, 0xe3542623, 0xb8f04501, 0x49554eaa, 0x5784625e, 0xc5fdb9d5, 0x089b3dc0, 0xf4741645, 0x59c2b88b, 0x044c5ecf, 0xb5b895f4, 0x5e643350, 0x59fd5974, 0x18af0a5e, 0x1690a41e, 0x7afd7df8, 0xce194b1d, 0x7348105f, 0x55bdac13, 0x992f1f30, 0x7576c408, 0xd5986ca9, 0xfb5db4bb, 0xdd077a3e, 0x1555fd40, 0x8f43e64c, 0x0891c0b2, 0xc034b002, 0xa178edf1, 0x73c6aa69, 0x43768cbf, 0x50518a88, 0x5cea403e, 0x66621f52, 0x6454ada9, 0xfc5353d7, 0xb6367c33, 0x36083ea3, 0x7c6370d7, 0x871ce0df, 0x996a9db6, 0xfc85b23e, 0x4832a696, 0xd9e633e3, 0x8d57a607, 0xb065c6f6, 0x6b14e612, 0x640a3223, 0x7d40f089, 0x00e856a5, 0x02b89be7, 0xbb17308d, 0xf495ca2e, 0x36d73234, 0xe6efc3f6, 0x3e34538a, 0x50cae00d, 0xc659ceea, 0x8f9c4fd0, 0xffd2a14b, 0x8f3c0302, 0xad6bf0e2, 0x7da1cdf3, 0x587bb565, 0xf5a45c06, 0x7b58dfac, 0x8d5689a9, 0xdd8193b7, 0x0fea45f3, 0x849600fa, 0x71d1f8ee, 0xd9cd7337, 0x10ed0d73, 0xbfb107e3, 0xae79885f, 0xbc424999, 0xefa0a006, 0x66fcd210, 0xb8221c2c, 0x4e4fcb90, 0xbc19bd01, 0x13a6165e, 0xb2456d5c, 0x9de9927f, 0x5cad9384, 0xc5b8aca8, 0x709eaed3, 0x025d5327, 0x3334f98d, 0xc8775813, 0xa7e3a7b5, 0x4900d83c, 0xdb3e88aa, 0x798cf72d, 0xc5201d9c, 0xd301c8a5, 0x24aba004, 0x7fb00d06, 0x96b07431, 0x07e817d2, 0x5dc5e25c, 0xe3565527, 0x4e1527e2, 0x9b19ac07, 0xb97a792b, 0x810e2f87, 0xb7e67428, 0xbdc1ad62, 0x4d68b464, 0x86743e97, 0x5ac3a913, 0x74a330bc, 0xedbd0163, 0x17c1ca06, 0x3762d54b, 0x67cac477, 0x57fb1db4, 0xca5c3afa, 0xe9cdec2a, 0x3f517285, 0x6c123489, 0x8c9c347f, 0xdc352535, 0xfd72d8b0, 0x615adae6, 0xd2149734, 0xb4d087d2, 0x50777523, 0x152794d7, 0xa1de388e, 0xacbfb4a7, 0xcc792a3f, 0x0c9beff1, 0x4518932e, 0xbb10562a, 0x0231bbab, 0x34d6b5e4, 0xe56b9d51, 0x1179cf3a, 0x7c9cca04, 0x7e6804dd, 0x43749e37, 0x474abbed, 0xeca9da66, 0xdbc9f38a, 0xbd90f4f2, 0xca3e3208, 0xf6804e7b, 0x6c2f368b, 0x71865b53, 0x477e86cf, 0xf9556567, 0x99f1268d, 0x5b0d3742, 0x5c7759e9, 0x329a93f4, 0x6edb40ef, 0xd8238cf4, 0x14c6653d, 0xda60ef87, 0x845f263d, 0x5ecb0b8a, 0xc0318caa, 0xb6e9c384, 0xb375f88e, 0x8bbc46cc, 0x9cf69f27, 0x5ad28fa8, 0x458c2af7, 0xc848d9c1, 0xa8fc2598, 0xef9d6e05, 0x72e2b656, 0x548e9bd8, 0x57c6446f, 0x1ed6fa9f, 0xee2aabba, 0xa0ad6aeb, 0x9cf39373, 0xe1ef4e1e, 0x84aaf630, 0x51839901, 0x1fd11926, 0x7e8d187e, 0x3f0c9a19, 0xd15b2dd2, 0x87bcba56, 0x4e4abd25, 0xfe14d638, 0x632c77f2, 0xdd2fb790, 0xa7172b76, 0x51b3b07e, 0xbc087c39, 0x08e4bfcd, 0x835236c7, 0x2f55bb11, 0x01de9ed4, 0x3dfc98ce, 0x28776e03, 0x98fb6143, 0x3b05e401, 0x0999f936, 0x673cda1e, 0x9075e503, 0x80dbd8a1, 0x1d113be7, 0x368624b0, 0xd06b7118, 0xdc0378f6, 0x0c6aee59, 0x3c77c121, 0xb75108d5, 0xa1d6c1a7, 0xdacd595b, 0x71d9b0e5, 0x4afe1c59, 0x4a2cd1e8, 0x8c3eda88, 0x97ae95c2, 0x6bfacf26, 0x47182448, 0xdbd882ef, 0xae660019, 0xb0986ca0, 0xe8daf1be, 0xb18b34dd, 0x6ba59afa, 0x560c57ea, 0xf6be34ba, 0xcd89f600, 0x6403fcfe, 0x5e429947, 0x7d4886e1, 0x8018e754, 0xc17251e7, 0x36eab7c6, 0x9067ad78, 0xe95f1fe6, 0x44961501, 0x8619621c, 0xdb13e77d, 0xd5d6124d, 0xd36f8ac0, 0x9395356c, 0xf713d1e1, 0x2b3c9d15, 0x4dfb98f9, 0xb553ff7f, 0xd1675da5, 0x382dcfdb, 0x659ed198, 0xa0bfe7d7, 0x0c1fe7f7, 0x6ae7194d, 0x1966c9fe, 0x369f1f79, 0x1137c2c2, 0xf7a06345, 0xfe3f544b, 0xac35a2f4, 0xd7aea537, 0x9b37ff3b, 0xfc395a7b, 0xf3c2710a, 0xf7ec7804, 0x5f5820ca, 0x72b2a99c, 0x6162f1ca, 0x9f4288a2, 0xa888ed1e, 0x02208839, 0xea56c569, 0xace682bc, 0x95096878, 0xf33986ce, 0xbc3ab34e, 0x852fb06b, 0x4d809b7b, 0x3475e9c8, 0xe947baae, 0x18535080, 0x205c85fa, 0x5792f851, 0xe029ec48, 0xd4403f27, 0x587471da, 0x3bd97278, 0x91f1a328, 0x65ea5d8a, 0xc0cfbf0f, 0x135abf90, 0x62843a32, 0xdf6a7aa1, 0x79dc7616, 0xd091c454, 0x355e2d4a, 0xa54e04a6, 0x25719823, 0x41bfa322, 0x94ec342e, 0xbcd06558, 0x52775497, 0xdcd0c726, 0x8c8f3975, 0xbc31513b, 0xb9b3acec, 0x33bbeff5, 0xa3432872, 0xd8dd265e, 0xc1d9f64e, 0xe016c95d, 0x840b9c5e, 0xecdf0d3b, 0x9335eac7, 0xe319c342, 0xaf8a83ca, 0xc2f11b65, 0x8d40c919, 0x3b91cd82, 0x70aad694, 0x341ff4ee, 0xb3fdc758, 0x86e8e96c, 0x239e0308, 0x7daaf786, 0x6d9c3e2e, 0x028237e0, 0x652b0a79, 0x0f203491, 0xccb40e73, 0xb1954681, 0x7ab03780, 0x9cd9ef50, 0x43e720b7, 0xe7de746d, 0xade36a14, 0xbb4f7503, 0x21ef55aa, 0x45a0e8e3, 0x1156ea33, 0x1091d26b, 0xc8b9a8d0, 0x722df639, 0x92977f76, 0x8cb11fe6, 0x0aaeedc7, 0xb1096093, 0xe45ea74f, 0xc2b54ff5, 0x190e549f, 0x222192cb, 0x695f9d7e, 0x926d466a, 0x0dc294aa, 0x7e16a1b5, 0xa4bd2267, 0x19ee878a, 0xc5b8c83a, 0x001b6546, 0x6fbf7edf, 0xb3708024, 0x8e98402d, 0x86af015f, 0x38dfd5b8, 0xc50808ef, 0x29a71185, 0x85f233d5, 0x9dea5939, 0xce3cf02c, 0x45d68b76, 0x745a85b0, 0x6fe38075, 0x52e01b99, 0xc4693697, 0x2cd0bf67, 0x3edecb8f, 0xeb76f624, 0xe3eb94f4, 0xf587d9e5, 0x813543ea, 0x93dfaced, 0x018c23ad, 0xb6781fd7, 0xbd564fc3, 0x962da3f0, 0x389ab6b1, 0x5866fd23, 0x1f8b3979, 0xc10386bc, 0x4ef64f11, 0xb4572417, 0xa2f65667, 0xd68b6523, 0xa5512c00, 0x2d5f663e, 0x3bf700ff, 0x09ef3396, 0x451bcb0d, 0xfee39ccd, 0xf606c443, 0xb46ffa45, 0x85bcfa16, 0x7fc6efa9, 0x701ec2fe, 0xde98a301, 0xd9980668, 0xc5d004ae, 0xd03dbbb3, 0xb1d795c2, 0x2ab6203e, 0xfa237b58, 0xbf425321, 0x000e019a, 0x2547dbbf, 0xfb97b8ed, 0x08b09edd, 0x42cd7b68, 0x32198a7f, 0x87a2a72d, 0xe0a6a1aa, 0x2866775c, 0xc66e7ac5, 0x9edc41d3, 0x983a6ec5, 0xd3e9793a, 0xa53ad299, 0x38d774ce, 0x75ce7fbd, 0xb353f86e, 0xc37bc25e, 0x5f2b5532, 0x1975cde4, 0xc8294de5, 0x47fbc7c1, 0xb76b2789, 0xea88c920, 0x6c2145ba, 0x4c2211f2, 0x16a0e6de, 0xa2915469, 0xea2b14e2, 0x35202f43, 0xaa9c69de, 0xbd00bd0e, 0xbea9e3f0, 0xbfb0bb47, 0x74347ce4, 0x1bc15e8f, 0xd9f70c10, 0x968f1e31, 0xc55fa605, 0x47af7015, 0xe772ade0, 0x28a5c782, 0x0955a18f, 0x1054e43d, 0x5d7702b1, 0xd54a0e22, 0x82f0f8be, 0x787b2bce, 0x243ce7b8, 0xaa160942, 0x5e7477e9, 0x45cd0df6, 0x5bdca3d7, 0x6b992304, 0x4a8d3515, 0xfe356a57, 0xb011fc33, 0x0e2624c6, 0x0ba4ae36, 0x029fecca, 0x01ac36cc, 0x661010ac, 0x7a8a378a, 0xf499b342, 0x973256e2, 0x1229865d, 0x03411f03, 0x7d925789, 0x6399fa28, 0x1859ddb8, 0x83becb6d, 0x3b8322f5, 0xd0d5f97a, 0xc0fcca51, 0x6be07c35, 0x6072bdea, 0xc09b95e6, 0xa40826f8, 0x1fb79832, 0xc401e83d, 0xcff57a0c, 0x834ee1c2, 0x59ab9f78, 0xad6e094a, 0xad2218bb, 0x3bbf864c, 0xbb62b997, 0x23eed3ff, 0xd033de59, 0x031f0ec4, 0xbcfabf99, 0xac63ae10, 0xe4dc4aec, 0x5b98d623, 0x0dad0246, 0xb5884cbb, 0xa39db5c7, 0x3c243f66, 0x5e69dbfb, 0x3384971d, 0x9145a99e, 0x87570b15, 0xd6821205, 0x34fa05cf, 0xff4ac046, 0x8a98f678, 0x72add320, 0x910a51a5, 0xe78b8b93, 0xb28cd243, 0x6a752e76, 0x16b4e6a9, 0x60b5e403, 0xc5c51f70, 0xbee86c57, 0x75a122c3, 0x3b7e773d, 0xfd8ab8ec, 0x72839672, 0x6a713aa4, 0x18fd1c1d, 0x2ae1e7db, 0xa77453d7, 0x01e7e6c4, 0x31b08d49, 0x636c7119, 0x736028e8, 0x75a31941, 0xcf080b2c, 0x4a92a8fb, 0x84f6b87a, 0x4f97e0dc, 0x8a7b11d2, 0x1ce7f369, 0x056f3a69, 0x40393f83, 0xffc98a61, 0x80daf387, 0xc6a757b1, 0xa95790e2, 0x1c76cf02, 0xa1450bba, 0x3a3150e5, 0x378e9844, 0x7c47420d, 0x617d2066, 0x8cbd025e, 0x252260a0, 0xd7ded568, 0x8e5400d7 }; unsigned int AdEncryptPassword(const char* username, const char* password) { unsigned int userlength; unsigned int passlength; unsigned int a = 0; int i; for (i = 0; username[i] != 0; i++) { a += AdRandomNumbers[(i + username[i]) & 0x7ff]; } userlength = i; for (i = 0; password[i] != 0; i++) { a += AdRandomNumbers[(i + password[i] + userlength) & 0x7ff]; } passlength = i; return AdRandomNumbers[(userlength + passlength) & 0x7ff] + a; } static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_key = mem_calloc_align(sizeof(*crypt_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q, *tmpstr; int extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; tmpstr = strdup(ciphertext); q = p = &tmpstr[TAG_LENGTH]; p[-1] = 0; p = strrchr(p, '$'); if (!p) goto Err; p += 1; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto Err; if ((p - q) >= SALT_SIZE || p <= q) goto Err; MEM_FREE(tmpstr); return 1; Err:; MEM_FREE(tmpstr); return 0; } static void *get_salt(char *ciphertext) { static char salt[SALT_SIZE + 1]; char *p, *q; memset(salt, 0, SALT_SIZE); p = ciphertext + TAG_LENGTH; q = strrchr(p, '$'); memcpy(salt, p, q - p); return salt; } static void* get_binary(char *ciphertext) { char *p; unsigned int* out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = strrchr(ciphertext, '$') + 1; *out = (unsigned int)strtoul(p, NULL, 16); return out; } static int salt_hash(void *salt) { unsigned char *s = salt; unsigned int hash = 5381; unsigned int len = SALT_SIZE; while (len-- && *s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { memcpy(saved_salt, salt, SALT_SIZE); } static void cq_set_key(char *key, int index) { strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index++) #endif { *crypt_key[index] = AdEncryptPassword(saved_salt, saved_key[index]); } return count; } static int cmp_all(void *binary, int count) { int i = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (i = 0; i < count; ++i) #endif { if ((*(unsigned int*)binary) == *(unsigned int*)crypt_key[i]) return 1; } return 0; } static int cmp_one(void *binary, int index) { if ((*(unsigned int*) binary) == *(unsigned int*) crypt_key[index]) return 1; return 0; } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } struct fmt_main fmt_cq = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, cq_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, cq_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
model_2cxm_mex.c
/*========================================================== * model_2cxm_mex.c - STARDCE toolbox * * Implements the DCE 2-compartment exchange model * * The calling syntax is: * * C = model_2cxm_mex(time, VIF, vp, ve, kt, fp ); * * Compilation: * * mex -R2018a model_2cxm_mex.c * or: * 1) uncomment the compiler directive MATLAB2015 * 2) mex COPTIMFLAGS="\$COPTIMFLAGS -std=c99" model_2cxm_mex.c * * Yannick 2020 * Copied from GPUfit by Sam Barnes * *========================================================*/ #include "mex.h" #include <math.h> #ifdef __GNU__ #include <omp.h> #endif #ifndef MAXCORES #define MAXCORES 1 #endif // #define MATLAB2015 float dce_2cxm_value_float ( float vp, // vp float ve, //ve float kt, //Ktrans float fp, // Fp int const point_index, // time points to evaluate teh model at float const * T, // time point vector float const * Cp // VIF ) { // integral/convolution float PS; if(kt>=fp) { PS = 10e8; } else { PS = fp / ((fp / kt) - 1); } float convFunc = 0; float Tp = vp / (PS + fp); float Te = ve / PS; float Tb = vp / fp; float Kpos = 0.5 * (1/Tp + 1/Te + sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb)); float Kneg = 0.5 * (1/Tp + 1/Te - sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb)); float Eneg = (Kpos - 1/Tb) / (Kpos - Kneg); for (int i = 1; i <= point_index; i++) { float spacing = T[i] - T[i - 1]; float Ct = Cp[i] * (exp(-(T[point_index] - T[i]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i]) * Kneg) - exp(-Kpos)));//(p2 * exp(-(T[point_index] - T[i])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i])/Tp))); float Ctprev = Cp[i - 1] * (exp(-(T[point_index] - T[i-1]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i-1]) * Kneg) - exp(-Kpos))); //(p2 * exp(-(T[point_index] - T[i-1])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i-1])/Tp))); convFunc += ((Ct + Ctprev) / 2 * spacing); } float function_value = fp * convFunc; return function_value; } float dce_2cxm_value_double ( double vp, // vp double ve, //ve double kt, //Ktrans double fp, // Fp int const point_index, // time points to evaluate teh model at double const * T, // time point vector double const * Cp // VIF ) { // integral/convolution double PS; if(kt>=fp) { PS = 10e8; } else { PS = fp / ((fp / kt) - 1); } double convFunc = 0; double Tp = vp / (PS + fp); double Te = ve / PS; double Tb = vp / fp; double Kpos = 0.5 * (1/Tp + 1/Te + sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb)); double Kneg = 0.5 * (1/Tp + 1/Te - sqrt(pow(1/Tp + 1/Te,2) - 4 * 1/Te * 1/Tb)); double Eneg = (Kpos - 1/Tb) / (Kpos - Kneg); for (int i = 1; i <= point_index; i++) { double spacing = T[i] - T[i - 1]; double Ct = Cp[i] * (exp(-(T[point_index] - T[i]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i]) * Kneg) - exp(-Kpos)));//(p2 * exp(-(T[point_index] - T[i])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i])/Tp))); double Ctprev = Cp[i - 1] * (exp(-(T[point_index] - T[i-1]) * Kpos) + Eneg * (exp(-(T[point_index] - T[i-1]) * Kneg) - exp(-Kpos))); //(p2 * exp(-(T[point_index] - T[i-1])/Tp) + p0 * (1 - exp(-(T[point_index] - T[i-1])/Tp))); convFunc += ((Ct + Ctprev) / 2 * spacing); } double function_value = fp * convFunc; return function_value; } /* The gateway function */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { size_t N, Nt; mxClassID precision; float *Cpf, *timef, *vpf, *vef, *ktf, *fpf, *Cf; double *Cpd, *timed, *vpd, *ved, *ktd, *fpd, *Cd; /* check for proper number of arguments */ if(nrhs!=6) { mexErrMsgIdAndTxt("STARDCE:model_standard:nrhs","Six inputs required: time, VIF, vp, ve, kt, fp."); } if(nlhs!=1) { mexErrMsgIdAndTxt("STARDCE:model_standard:nlhs","One output required."); } // read all inputs Nt = mxGetN(prhs[0]); N = mxGetN(prhs[2]); if (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) { precision = mxDOUBLE_CLASS; #ifdef MATLAB2015 // try to be backward compatible timed = mxGetPr(prhs[0]); Cpd = mxGetPr(prhs[1]); vpd = mxGetPr(prhs[2]); ved = mxGetPr(prhs[3]); ktd = mxGetPr(prhs[4]); fpd = mxGetPr(prhs[5]); #else timed = mxGetDoubles(prhs[0]); Cpd = mxGetDoubles(prhs[1]); vpd = mxGetDoubles(prhs[2]); ved = mxGetDoubles(prhs[3]); ktd = mxGetDoubles(prhs[4]); fpd = mxGetDoubles(prhs[5]); #endif } else { precision = mxSINGLE_CLASS; #ifdef MATLAB2015 // try to be backward compatible timef = mxGetData(prhs[0]); Cpf = mxGetData(prhs[1]); vpf = mxGetData(prhs[2]); vef = mxGetData(prhs[3]); ktf = mxGetData(prhs[4]); fpf = mxGetData(prhs[5]); #else timef = mxGetSingles(prhs[0]); Cpf = mxGetSingles(prhs[1]); vpf = mxGetSingles(prhs[2]); vef = mxGetSingles(prhs[3]); ktf = mxGetSingles(prhs[4]); fpf = mxGetSingles(prhs[5]); #endif } // output concentration plhs[0] = mxCreateNumericMatrix(N, Nt, precision, mxREAL); #ifdef __GNU__ /* Set number of threads */ omp_set_num_threads(MAXCORES); #endif if (precision == mxDOUBLE_CLASS){ #ifdef MATLAB2015 Cd = mxGetPr(plhs[0]); #else Cd = mxGetDoubles(plhs[0]); #endif #pragma omp parallel for private(n,t) for (int n = 0; n < N; n++) { for (int t = 0; t < Nt; t++) { Cd[t * N + n] = dce_2cxm_value_double(vpd[n], vef[n], ktd[n], fpd[n], t, timed, Cpd); } } } else { #ifdef MATLAB2015 Cf = mxGetData(plhs[0]); #else Cf = mxGetSingles(plhs[0]); #endif #pragma omp parallel for private(n,t) for (int n = 0; n < N; n++) { for (int t = 0; t < Nt; t++) { Cf[t * N + n] = dce_2cxm_value_float(vpf[n], vef[n], ktf[n], fpf[n], t, timef, Cpf); } } } }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) { for (t4=max(max(ceild(t1-30,32),ceild(8*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(floord(4*Nt+Nx-9,128),floord(4*t1+Nx-1,128)),floord(8*t3+Nx-5,128));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),32*t4+30);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
lis_vector_opv.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * lis_vector_axpy y <- y + alpha * x * lis_vector_xpay y <- x + alpha * y * lis_vector_axpyz z <- y + alpha * x * lis_vector_copy y <- x * lis_vector_scale y <- alpha * x * lis_vector_pmul z_i <- x_i * y_i * lis_vector_pdiv z_i <- x_i / y_i * lis_vector_set_all x_i <- alpha * lis_vector_abs x_i <- |x_i| * lis_vector_reciprocal x_i <- 1 / x_i * lis_vector_shift x_i <- alpha + x_i ************************************************/ /**********************/ /* y <- y + alpha * x */ /**********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_axpy" LIS_INT lis_vector_axpy(LIS_SCALAR alpha, LIS_VECTOR vx, LIS_VECTOR vy) { LIS_INT i,n; LIS_SCALAR *x,*y; LIS_DEBUG_FUNC_IN; n = vx->n; #ifndef NO_ERROR_CHECK if( n!=vy->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y is not equal\n"); return LIS_ERR_ILL_ARG; } #endif x = vx->value; y = vy->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { y[i] += alpha * x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /**********************/ /* y <- x + alpha * y */ /**********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_xpay" LIS_INT lis_vector_xpay(LIS_VECTOR vx, LIS_SCALAR alpha, LIS_VECTOR vy) { LIS_INT i,n; LIS_SCALAR *x,*y; LIS_DEBUG_FUNC_IN; n = vx->n; #ifndef NO_ERROR_CHECK if( n!=vy->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y is not equal\n"); return LIS_ERR_ILL_ARG; } #endif x = vx->value; y = vy->value; #ifdef _OPENMP #pragma omp parallel for private(i) #endif #ifdef USE_VEC_COMP #pragma cdir nodep #endif for(i=0; i<n; i++) { y[i] = x[i] + alpha * y[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /**********************/ /* z <- y + alpha * x */ /**********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_axpyz" LIS_INT lis_vector_axpyz(LIS_SCALAR alpha, LIS_VECTOR vx, LIS_VECTOR vy, LIS_VECTOR vz) { LIS_INT i,n; LIS_SCALAR *x,*y,*z; LIS_DEBUG_FUNC_IN; n = vx->n; #ifndef NO_ERROR_CHECK if( n!=vy->n || n!=vz->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y and z is not equal\n"); return LIS_ERR_ILL_ARG; } #endif x = vx->value; y = vy->value; z = vz->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { z[i] = alpha * x[i] + y[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* y <- x */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_copy" LIS_INT lis_vector_copy(LIS_VECTOR vx, LIS_VECTOR vy) { LIS_INT i,n; LIS_SCALAR *x,*y; LIS_DEBUG_FUNC_IN; n = vx->n; #ifndef NO_ERROR_CHECK if( n!=vy->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y is not equal\n"); return LIS_ERR_ILL_ARG; } #endif x = vx->value; y = vy->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { y[i] = x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* y <- alpha * x */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_scale" LIS_INT lis_vector_scale(LIS_SCALAR alpha, LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = alpha * x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* x_i <- alpha */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_set_all" LIS_INT lis_vector_set_all(LIS_SCALAR alpha, LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = alpha; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* z_i <- x_i * y_i */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_pmul" LIS_INT lis_vector_pmul(LIS_VECTOR vx,LIS_VECTOR vy,LIS_VECTOR vz) { LIS_INT i,n; LIS_SCALAR *x,*y,*z; LIS_DEBUG_FUNC_IN; n = vx->n; #ifndef NO_ERROR_CHECK if( n!=vy->n || n!=vz->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y and z is not equal\n"); return LIS_ERR_ILL_ARG; } #endif x = vx->value; y = vy->value; z = vz->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { z[i] = x[i] * y[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* z_i <- x_i / y_i */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_pdiv" LIS_INT lis_vector_pdiv(LIS_VECTOR vx,LIS_VECTOR vy,LIS_VECTOR vz) { LIS_INT i,n; LIS_SCALAR *x,*y,*z; LIS_DEBUG_FUNC_IN; n = vx->n; #ifndef NO_ERROR_CHECK if( n!=vy->n || n!=vz->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y and z is not equal\n"); return LIS_ERR_ILL_ARG; } #endif x = vx->value; y = vy->value; z = vz->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { z[i] = x[i] / y[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* x_i <- |x_i| */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_abs" LIS_INT lis_vector_abs(LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x; LIS_DEBUG_FUNC_IN; x = vx->value; n = vx->n; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = fabs(x[i]); } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /********************/ /* x_i <- 1 / x_i */ /********************/ #undef __FUNC__ #define __FUNC__ "lis_vector_reciprocal" LIS_INT lis_vector_reciprocal(LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x; LIS_DEBUG_FUNC_IN; x = vx->value; n = vx->n; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = 1.0 / x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /************************/ /* x_i <- alpha + x_i */ /************************/ #undef __FUNC__ #define __FUNC__ "lis_vector_shift" LIS_INT lis_vector_shift(LIS_SCALAR alpha, LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x; LIS_DEBUG_FUNC_IN; x = vx->value; n = vx->n; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = alpha + x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /*******************************/ /* q <- x - (x,y)/||x,x|| * y */ /*******************************/ /* #undef __FUNC__ #define __FUNC__ "lis_vector_cgs" LIS_INT lis_vector_cgs(LIS_SCALAR alpha, LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x; LIS_DEBUG_FUNC_IN; x = vx->value; n = vx->n; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = alpha + x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } */ /*************************************/ /* QR <- X by Classical Gram-Schmidt */ /*************************************/ #undef __FUNC__ #define __FUNC__ "lis_vector_cgs" LIS_INT lis_vector_cgs(LIS_INT n, LIS_VECTOR *x, LIS_VECTOR *q, LIS_VECTOR *r) { LIS_INT i, j, k; LIS_VECTOR x_k; LIS_SCALAR nrm2; LIS_REAL tol; lis_vector_duplicate(x[0], &x_k); tol = 1e-6; for (k=0;k<n;k++) { lis_vector_set_all(0.0,q[k]); lis_vector_set_all(0.0,r[k]); } for (k=0;k<n;k++) { lis_vector_copy(x[k],x_k); for (j=0;j<k;j++) { r[k]->value[j] = 0; for (i=0;i<n;i++) { r[k]->value[j] += q[j]->value[i] * x[k]->value[i]; } for (i=0;i<n;i++) { x_k->value[i] += q[j]->value[i] * x[k]->value[i]; } } lis_vector_nrm2(x_k, &nrm2); if (nrm2<tol) break; for (i=0;i<n;i++) { q[k]->value[i] = x_k->value[i] / nrm2; } } lis_vector_destroy(x_k); return 0; } /* #undef __FUNC__ #define __FUNC__ "lis_array_mgs" LIS_INT lis_array_mgs(LIS_INT n, LIS_SCALAR *x, LIS_SCALAR *q, LIS_SCALAR *r) { LIS_INT i, j, k; LIS_SCALAR *x_j, nrm2; LIS_REAL tol; tol = 1e-12; x_j = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR), "lis_array_mgs::x_j"); for (j=0;j<n;j++) { for (i=0;i<n;i++) { x_j[i] = x[i*n+j]; } lis_array_nrm2(n, &x_j[0], &nrm2); r[j*n+j] = nrm2; for (i=0;i<n;i++) { if (nrm2<tol) break; q[i*n+j] = x_j[i] / nrm2; } for (k=j+1;k<n;k++) { r[j*n+k] = 0; for (i=0;i<n;i++) { r[j*n+k] = r[j*n+k] + q[i*n+j] * x[i*n+k]; } for (i=0;i<n;i++) { x[i*n+k] = x[i*n+k] - r[j*n+k] * q[i*n+j]; } } } lis_free(x_j); return 0; } */
GB_binop__minus_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint16) // A*D function (colscale): GB (_AxD__minus_uint16) // D*A function (rowscale): GB (_DxB__minus_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint16) // C=scalar+B GB (_bind1st__minus_uint16) // C=scalar+B' GB (_bind1st_tran__minus_uint16) // C=A+scalar GB (_bind2nd__minus_uint16) // C=A'+scalar GB (_bind2nd_tran__minus_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT16 || GxB_NO_MINUS_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/opencl-private.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/threshold.h" #ifdef MAGICKCORE_CLPERFMARKER #include "CLPerfMarker.h" #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveBlurImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *AdaptiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double **kernel, normalize; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*QuantumScale* GetPixelIntensity(edge_image,r)-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel[i]; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImageChannel) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveSharpenImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma, exception); return(sharp_image); } MagickExport Image *AdaptiveSharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double **kernel, normalize; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse) { InheritException(exception,&sharp_image->exception); sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict sharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view); for (x=0; x < (ssize_t) sharp_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); k=kernel[i]; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImageChannel) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *BlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception); return(blur_image); } MagickExport Image *BlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image = NULL; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const size_t order, % const double *kernel,ExceptionInfo *exception) % Image *ConvolveImageChannel(const Image *image,const ChannelType channel, % const size_t order,const double *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o order: the number of columns and rows in the filter kernel. % % o kernel: An array of double representing the convolution kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image,const size_t order, const double *kernel,ExceptionInfo *exception) { Image *convolve_image; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel, exception); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(convolve_image); } MagickExport Image *ConvolveImageChannel(const Image *image, const ChannelType channel,const size_t order,const double *kernel, ExceptionInfo *exception) { Image *convolve_image; KernelInfo *kernel_info; register ssize_t i; kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=order; kernel_info->height=order; kernel_info->x=(ssize_t) (order-1)/2; kernel_info->y=(ssize_t) (order-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (order*order); i++) kernel_info->values[i]=kernel[i]; convolve_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (convolve_image == (Image *) NULL) convolve_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; register ssize_t i; Quantum *magick_restrict buffer, *magick_restrict pixels; size_t length, number_channels; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image, exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse) { InheritException(exception,&despeckle_image->exception); despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4); image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) number_channels; i++) { register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; if ((image->matte == MagickFalse) && (i == 3)) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: pixels[j]=GetPixelRed(p); break; case 1: pixels[j]=GetPixelGreen(p); break; case 2: pixels[j]=GetPixelBlue(p); break; case 3: pixels[j]=GetPixelOpacity(p); break; case 4: pixels[j]=GetPixelBlack(indexes+x); break; default: break; } p++; j++; } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(despeckle_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: SetPixelRed(q,pixels[j]); break; case 1: SetPixelGreen(q,pixels[j]); break; case 2: SetPixelBlue(q,pixels[j]); break; case 3: SetPixelOpacity(q,pixels[j]); break; case 4: SetPixelIndex(indexes+x,pixels[j]); break; default: break; } q++; j++; } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) { status=MagickFalse; break; } j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, number_channels); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (edge_image == (Image *) NULL) edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology, 1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (emboss_image == (Image *) NULL) emboss_image=MorphologyImageChannel(image,DefaultChannels, ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImageChannel(emboss_image,(ChannelType) (AllChannels &~ SyncChannels)); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FilterImage() applies a custom convolution kernel to the image. % % The format of the FilterImage method is: % % Image *FilterImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % Image *FilterImageChannel(const Image *image,const ChannelType channel, % const KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel, ExceptionInfo *exception) { Image *filter_image; filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception); return(filter_image); } MagickExport Image *FilterImageChannel(const Image *image, const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception) { #define FilterImageTag "Filter/Image" CacheView *filter_view, *image_view; Image *filter_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType *filter_kernel; register ssize_t i; ssize_t y; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif /* Initialize filter image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((kernel->width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double) kernel->height); message=AcquireString(""); k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) kernel->width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } #if defined(MAGICKCORE_OPENCL_SUPPORT) filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception); if (filter_image != (Image *) NULL) { #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } #endif filter_image=CloneImage(image,0,0,MagickTrue,exception); if (filter_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse) { InheritException(exception,&filter_image->exception); filter_image=DestroyImage(filter_image); return((Image *) NULL); } /* Normalize kernel. */ filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*filter_kernel))); if (filter_kernel == (MagickRealType *) NULL) { filter_image=DestroyImage(filter_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) filter_kernel[i]=(MagickRealType) kernel->values[i]; /* Filter image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); filter_view=AcquireAuthenticCacheView(filter_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,filter_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict filter_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y- (ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width, kernel->height,exception); q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket pixel; register const MagickRealType *magick_restrict k; register const PixelPacket *magick_restrict kernel_pixels; register ssize_t u; ssize_t v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=filter_kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+kernel->width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index)); } } else { double alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- GetPixelOpacity(kernel_pixels+u))); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u); gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+kernel->width; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u); k++; } kernel_pixels+=image->columns+kernel->width; kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(filter_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FilterImageChannel) #endif proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } filter_image->type=image->type; filter_view=DestroyCacheView(filter_view); image_view=DestroyCacheView(image_view); filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel); if (status == MagickFalse) filter_image=DestroyImage(filter_image); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % Image *GaussianBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *GaussianBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (blur_image == (Image *) NULL) blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % Image *MotionBlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static double *GetMotionBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { Image *motion_blur; motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle, exception); return(motion_blur); } MagickExport Image *MotionBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset, exception); if (blur_image != (Image *) NULL) return blur_image; #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket qixel; PixelPacket pixel; register const IndexPacket *magick_restrict indexes; register double *magick_restrict k; register ssize_t i; k=kernel; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); qixel.red+=(*k)*pixel.red; qixel.green+=(*k)*pixel.green; qixel.blue+=(*k)*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*(*indexes); } k++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index)); } else { double alpha, gamma; alpha=0.0; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=(*k)*alpha*pixel.red; qixel.green+=(*k)*alpha*pixel.green; qixel.blue+=(*k)*alpha*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*alpha*GetPixelIndex(indexes); } gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double width, % const double sigma,ExceptionInfo *exception) % Image *KuwaharaImageChannel(const Image *image,const ChannelType channel, % const double width,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *kuwahara_image; kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma, exception); return(kuwahara_image); } MagickExport Image *KuwaharaImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define KuwaharaImageTag "Kiwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) channel; width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse) { InheritException(exception,&kuwahara_image->exception); gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,kuwahara_image->rows,1) #endif for (y=0; y < (ssize_t) kuwahara_image->rows; y++) { register IndexPacket *magick_restrict kuwahara_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view); for (x=0; x < (ssize_t) kuwahara_image->columns; x++) { double min_variance; MagickPixelPacket pixel; RectangleInfo quadrant, target; register ssize_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const PixelPacket *magick_restrict p; double variance; MagickPixelPacket mean; register const PixelPacket *magick_restrict k; register ssize_t n; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const PixelPacket *) NULL) break; GetMagickPixelPacket(image,&mean); k=p; for (n=0; n < (ssize_t) (width*width); n++) { mean.red+=(double) k->red; mean.green+=(double) k->green; mean.blue+=(double) k->blue; k++; } mean.red/=(double) (width*width); mean.green/=(double) (width*width); mean.blue/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(image,k); variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean)); k++; } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolateMagickPixelPacket(gaussian_image,image_view, UndefinedInterpolatePixel,(double) target.x+target.width/2.0, (double) target.y+target.height/2.0,&pixel,exception); if (status == MagickFalse) break; SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass) == MagickFalse) { InheritException(exception,&contrast_image->exception); contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory(GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(width+1)*(width+1); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p++; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *pix, *pixels; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(q,ClampToQuantum(GetPixelRed(p)*mult)); SetPixelGreen(q,ClampToQuantum(GetPixelGreen(p)*mult)); SetPixelBlue(q,ClampToQuantum(GetPixelBlue(p)*mult)); p++; q++; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MaxTextExtent], label[MaxTextExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel); if (i == (NumberTiles/2)) { (void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g", degrees,2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImageChannel(preview_image,DefaultChannels,gamma); (void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse); (void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MaxTextExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MaxTextExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MaxTextExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MaxTextExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MaxTextExtent); break; } case 6: { (void) CopyMagickString(factor,"poisson",MaxTextExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail, (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); (void) FormatLocaleString(label,MaxTextExtent,"threshold %g", (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange* percentage/100.0); (void) FormatLocaleString(label,MaxTextExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g", degrees,degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold); (void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g", 0.5*degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MaxTextExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MaxTextExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MaxTextExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MaxTextExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a rotational blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RotationalBlurImageChannel(const Image *image, % const ChannelType channel,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the rotational blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { Image *blur_image; blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception); return(blur_image); } MagickExport Image *RotationalBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType blur_radius, *cos_theta, offset, *sin_theta, theta; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRadialBlurImage(image,channel,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(MagickRealType) (n-1); cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (MagickRealType *) NULL) || (sin_theta == (MagickRealType *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(MagickRealType) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket qixel; MagickRealType normalize, radius; PixelPacket pixel; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } normalize=0.0; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); qixel.red+=pixel.red; qixel.green+=pixel.green; qixel.blue+=pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*indexes); } normalize+=1.0; } normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(normalize*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(normalize*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index)); } else { double alpha, gamma; alpha=1.0; gamma=0.0; for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=alpha*pixel.red; qixel.green+=alpha*pixel.green; qixel.blue+=alpha*pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=alpha*(*indexes); } gamma+=alpha; normalize+=1.0; } gamma=PerceptibleReciprocal(gamma); normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta); sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % Image *SelectiveBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { Image *blur_image; blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma, threshold,exception); return(blur_image); } MagickExport Image *SelectiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; double *kernel; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, width*sizeof(*kernel))); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace); if (status == MagickFalse) { InheritException(exception,&luminance_image->exception); kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); luminance_image=DestroyImage(luminance_image); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L)); GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double gamma; MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict l, *magick_restrict p; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { double contrast; DoublePixelPacket pixel; MagickRealType intensity; register const double *magick_restrict k; register ssize_t u; ssize_t j, v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel; intensity=GetPixelIntensity(image,p+center); gamma=0.0; j=0; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.red+=(*k)*GetPixelRed(p+u+j); pixel.green+=(*k)*GetPixelGreen(p+u+j); pixel.blue+=(*k)*GetPixelBlue(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.opacity+=(*k)*(p+u+j)->opacity; gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } else { MagickRealType alpha; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j)); pixel.red+=(*k)*alpha*GetPixelRed(p+u+j); pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j); pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j); pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k)*alpha; } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); k++; } j+=(ssize_t) (image->columns+width); } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(p+u+j)); pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } p++; l++; q++; } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImageChannel) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(double *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse) { InheritException(exception,&shade_image->exception); linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { MagickRealType distance, normal_distance, shade; PrimaryInfo normal; register const PixelPacket *magick_restrict p, *magick_restrict s0, *magick_restrict s1, *magick_restrict s2; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { /* Determine the surface normal and compute shading. */ s0=p+1; s1=s0+image->columns+2; s2=s1+image->columns+2; normal.x=(double) (GetPixelIntensity(linear_image,s0-1)+ GetPixelIntensity(linear_image,s1-1)+ GetPixelIntensity(linear_image,s2-1)- GetPixelIntensity(linear_image,s0+1)- GetPixelIntensity(linear_image,s1+1)- GetPixelIntensity(linear_image,s2+1)); normal.y=(double) (GetPixelIntensity(linear_image,s2-1)+ GetPixelIntensity(linear_image,s2)+ GetPixelIntensity(linear_image,s2+1)- GetPixelIntensity(linear_image,s0-1)- GetPixelIntensity(linear_image,s0)- GetPixelIntensity(linear_image,s0+1)); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z* normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } if (gray != MagickFalse) { SetPixelRed(q,shade); SetPixelGreen(q,shade); SetPixelBlue(q,shade); } else { SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1))); SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1))); SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1))); } q->opacity=s1->opacity; p++; q++; } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *SharpenImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception); return(sharp_image); } MagickExport Image *SharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a block defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: Choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse) { InheritException(exception,&spread_image->exception); spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(spread_image,&bias); width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,spread_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) spread_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(spread_view); pixel=bias; for (x=0; x < (ssize_t) spread_image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolateMagickPixelPacket(image,image_view,image->interpolate, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),&pixel, exception); if (status == MagickFalse) break; SetPixelPacket(spread_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % Image *UnsharpMaskImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double gain,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { Image *sharp_image; sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain, threshold,exception); return(sharp_image); } MagickExport Image *UnsharpMaskImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double gain,const double threshold,ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain, threshold,exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels), radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(MagickRealType) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket pixel; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict unsharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); if (fabs(2.0*pixel.red) < quantum_threshold) pixel.red=(MagickRealType) GetPixelRed(p); else pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain); SetPixelRed(q,ClampToQuantum(pixel.red)); } if ((channel & GreenChannel) != 0) { pixel.green=GetPixelGreen(p)-(MagickRealType) q->green; if (fabs(2.0*pixel.green) < quantum_threshold) pixel.green=(MagickRealType) GetPixelGreen(p); else pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain); SetPixelGreen(q,ClampToQuantum(pixel.green)); } if ((channel & BlueChannel) != 0) { pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue; if (fabs(2.0*pixel.blue) < quantum_threshold) pixel.blue=(MagickRealType) GetPixelBlue(p); else pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain); SetPixelBlue(q,ClampToQuantum(pixel.blue)); } if ((channel & OpacityChannel) != 0) { pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity; if (fabs(2.0*pixel.opacity) < quantum_threshold) pixel.opacity=(MagickRealType) GetPixelOpacity(p); else pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel.index=GetPixelIndex(indexes+x)-(MagickRealType) GetPixelIndex(unsharp_indexes+x); if (fabs(2.0*pixel.index) < quantum_threshold) pixel.index=(MagickRealType) GetPixelIndex(indexes+x); else pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+ (pixel.index*gain); SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImageChannel) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
2-8t.c
#include <stdio.h> #include <omp.h> int main() { omp_set_num_threads(8); #pragma omp parallel { printf(" Hello "); } printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
segment_reduce.h
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/spmm.h * \brief Segment reduce kernel function header. */ #ifndef DGL_ARRAY_CPU_SEGMENT_REDUCE_H_ #define DGL_ARRAY_CPU_SEGMENT_REDUCE_H_ #include <dgl/array.h> namespace dgl { namespace aten { namespace cpu { /*! * \brief CPU kernel of segment sum. * \param feat The input tensor. * \param offsets The offset tensor storing the ranges of segments. * \param out The output tensor. */ template <typename IdType, typename DType> void SegmentSum(NDArray feat, NDArray offsets, NDArray out) { int n = out->shape[0]; int dim = 1; for (int i = 1; i < out->ndim; ++i) dim *= out->shape[i]; const DType* feat_data = feat.Ptr<DType>(); const IdType* offsets_data = offsets.Ptr<IdType>(); DType *out_data = out.Ptr<DType>(); #pragma omp parallel for for (int i = 0; i < n; ++i) { for (IdType j = offsets_data[i]; j < offsets_data[i + 1]; ++j) { for (int k = 0; k < dim; ++k) { out_data[i * dim + k] += feat_data[j * dim + k]; } } } } /*! * \brief CPU kernel of segment min/max. * \param feat The input tensor. * \param offsets The offset tensor storing the ranges of segments. * \param out The output tensor. * \param arg An auxiliary tensor storing the argmin/max information * used in backward phase. */ template <typename IdType, typename DType, typename Cmp> void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) { int n = out->shape[0]; int dim = 1; for (int i = 1; i < out->ndim; ++i) dim *= out->shape[i]; const DType* feat_data = feat.Ptr<DType>(); const IdType* offsets_data = offsets.Ptr<IdType>(); DType *out_data = out.Ptr<DType>(); IdType *arg_data = arg.Ptr<IdType>(); std::fill(out_data, out_data + out.NumElements(), Cmp::zero); std::fill(arg_data, arg_data + arg.NumElements(), -1); #pragma omp parallel for for (int i = 0; i < n; ++i) { for (IdType j = offsets_data[i]; j < offsets_data[i + 1]; ++j) { for (int k = 0; k < dim; ++k) { const DType val = feat_data[j * dim + k]; if (Cmp::Call(out_data[i * dim + k], val)) { out_data[i * dim + k] = val; arg_data[i * dim + k] = j; } } } } } /*! * \brief CPU kernel of backward phase of segment min/max. * \param feat The input tensor. * \param arg The argmin/argmax tensor. * \param out The output tensor. */ template <typename IdType, typename DType> void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) { int n = feat->shape[0]; int dim = 1; for (int i = 1; i < out->ndim; ++i) dim *= out->shape[i]; const DType* feat_data = feat.Ptr<DType>(); const IdType* arg_data = arg.Ptr<IdType>(); DType* out_data = out.Ptr<DType>(); #pragma omp parallel for for (int i = 0; i < n; ++i) { for (int k = 0; k < dim; ++k) { int write_row = arg_data[i * dim + k]; if (write_row >= 0) out_data[write_row * dim + k] = feat_data[i * dim + k]; } } } } // namespace cpu } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
detector.c
#include "darknet.h" #include <unistd.h> ////0 #include <dirent.h> ////0 #include <stdlib.h> ////0 #include <sys/stat.h> ////0 static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); double time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ printf("Resizing\n"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net->max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%d\n", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } time=what_time_is_it_now(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); time=what_time_is_it_now(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); if(i%100==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) do_nms_sort(dets, num, classes, nms); if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) do_nms_sort(dets, nboxes, classes, nms); if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths("data/coco_val_5k.list"); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) do_nms_obj(dets, nboxes, 1, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen, char *idir, char *odir) ////0 //void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) //1 { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { /*printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n");*/ //1 /////////////0 if(!idir || !odir) //原版测试多张图片 { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } else { //带indir 和 odir参数 //idir && odir char imagepath[512]; char savedir[512]; struct dirent *imagename; //readdir return DIR *dir; ///////// dir = opendir(idir); //遍历输入文件夹 while((imagename=readdir(dir))!= NULL) { //忽略 ./ ../目录 if(!strcmp(imagename->d_name,".")||!strcmp(imagename->d_name,"..")) continue; sprintf(imagepath,"%s%s",idir,imagename->d_name); image im = load_image_color(imagepath, 0, 0); image sized = letterbox_image(im, net->w, net->h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", imagepath, what_time_is_it_now()-time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); //****modified0612******// //draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); draw_detections_person(imagename->d_name, odir, im, dets, nboxes, thresh, names, alphabet, l.classes); free_detections(dets, nboxes); char imagesdir[512]; sprintf(imagesdir,"%s%s",odir,"images/"); sprintf(savedir,"%s%s",imagesdir,imagename->d_name); //strcat(odir, imagename->d_name); int k = 0; for (k = strlen(savedir)-1; k>=0; k--) { if((savedir[k]!='j')&&(savedir[k]!='p')&&(savedir[k]!='g')&&(savedir[k]!='.')) { break; } else { savedir[k] = '\0'; } } save_image(im, savedir); printf("image saved success!\n"); free_image(im); free_image(sized); } closedir(dir); break; } } image im = load_image_color(input,0,0); image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); //draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); //1 draw_detections_person(input, odir, im, dets, nboxes, thresh, names, alphabet, l.classes); ////0 free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV make_window("predictions", 512, 512, 0); show_image(im, "predictions", 0); #endif } free_image(im); free_image(sized); if (filename) break; } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; //char *filename = (argc > 6) ? argv[6]: 0; //1 char *filename = find_char_arg(argc, argv, "-input", 0); ////0 char *idir = find_char_arg(argc, argv, "-idir", 0); ////0 char *odir = find_char_arg(argc, argv, "-odir", 0); ////0 if(0==strcmp(argv[2], "test")) //test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); //1 test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen, idir, odir); ////0 else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); }
sum_openmp.c
/* Copyright (C) 2018 Francesc Alted http://blosc.org License: BSD 3-Clause (see LICENSE.txt) Example program showing how to operate with compressed buffers. To compile this program for synthetic data (default): $ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2 To run: $ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$) Sum for uncompressed data: 199950000000 Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s Compression ratio: 762.9 MB -> 14.0 MB (54.6x) Compression time: 0.288 s, 2653.5 MB/s Sum for *compressed* data: 199950000000 Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s To use real (rainfall) data: $ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp And running it: $ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$) Sum for uncompressed data: 29741012 Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s Compression ratio: 381.5 MB -> 71.3 MB (5.3x) Compression time: 1.53 s, 249.1 MB/s Sum for *compressed* data: 29741012 Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <sys/stat.h> #include <errno.h> #include <assert.h> #include "blosc2.h" #define KB 1024. #define MB (1024*KB) #define GB (1024*MB) #define N (100 * 1000 * 1000) #define CHUNKSIZE (16 * 1000) #define NCHUNKS (N / CHUNKSIZE) #define NTHREADS 8 #define NITER 5 #ifdef RAINFALL #define SYNTHETIC false #else #define SYNTHETIC true #endif #if SYNTHETIC == true #define DTYPE int64_t #define CLEVEL 3 #define CODEC BLOSC_BLOSCLZ #else #define DTYPE float #define CLEVEL 1 #define CODEC BLOSC_LZ4 #endif int main(void) { static DTYPE udata[N]; DTYPE chunk_buf[CHUNKSIZE]; size_t isize = CHUNKSIZE * sizeof(DTYPE); DTYPE sum, compressed_sum; int64_t nbytes, cbytes; blosc2_schunk* schunk; int i, j, nchunk; blosc_timestamp_t last, current; double ttotal, itotal; char* envvar = NULL; printf("Blosc version info: %s (%s)\n", BLOSC_VERSION_STRING, BLOSC_VERSION_DATE); // Fill the buffer for a chunk if (SYNTHETIC) { for (j = 0; j < CHUNKSIZE; j++) { chunk_buf[j] = j; } } else { struct stat info; const char *filegrid = "rainfall-grid-150x150.bin"; if (stat(filegrid, &info) != 0) { printf("Grid file %s not found!", filegrid); exit(1); } char *cdata = malloc(info.st_size); FILE *f = fopen(filegrid, "rb"); size_t blocks_read = fread(cdata, info.st_size, 1, f); assert(blocks_read == 1); fclose(f); int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf); if (dsize < 0) { printf("blosc_getitem() error. Error code: %d\n. Probaly reading too much data?", dsize); exit(1); } free(cdata); } // Fill the uncompressed dataset with data chunks for (i = 0; i < N / CHUNKSIZE; i++) { for (j = 0; j < CHUNKSIZE; j++) { udata[i * CHUNKSIZE + j] = chunk_buf[j]; } } // Reduce uncompressed dataset ttotal = 1e10; sum = 0; for (int n = 0; n < NITER; n++) { sum = 0; blosc_set_timestamp(&last); #pragma omp parallel for reduction (+:sum) for (i = 0; i < N; i++) { sum += udata[i]; } blosc_set_timestamp(&current); itotal = blosc_elapsed_secs(last, current); if (itotal < ttotal) ttotal = itotal; } printf("Sum for uncompressed data: %10.0f\n", (double)sum); printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n", ttotal, (double)(isize * NCHUNKS) / (double)(ttotal * MB)); // Create a super-chunk container for the compressed container long codec = CODEC; envvar = getenv("SUM_COMPRESSOR"); if (envvar != NULL) { codec = blosc_compname_to_compcode(envvar); if (codec < 0) { printf("Unknown compresssor: %s\n", envvar); return 1; } } blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; cparams.compcode = (uint8_t)codec; long clevel = CLEVEL; envvar = getenv("SUM_CLEVEL"); if (envvar != NULL) { clevel = strtol(envvar, NULL, 10); } cparams.clevel = (uint8_t)clevel; cparams.typesize = sizeof(DTYPE); cparams.nthreads = 1; blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; dparams.nthreads = 1; blosc_set_timestamp(&last); blosc2_storage storage = {.cparams=&cparams, .dparams=&dparams}; schunk = blosc2_schunk_new(storage); for (nchunk = 0; nchunk < NCHUNKS; nchunk++) { for (i = 0; i < CHUNKSIZE; i++) { chunk_buf[i] = udata[i + nchunk * CHUNKSIZE]; } blosc2_schunk_append_buffer(schunk, chunk_buf, isize); } blosc_set_timestamp(&current); ttotal = blosc_elapsed_secs(last, current); nbytes = schunk->nbytes; cbytes = schunk->cbytes; printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n", nbytes / MB, cbytes / MB, (1. * nbytes) / cbytes); printf("Compression time: %.3g s, %.1f MB/s\n", ttotal, nbytes / (ttotal * MB)); int nthreads = NTHREADS; envvar = getenv("OMP_NUM_THREADS"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value >= 0)) { nthreads = (int)value; } } // Build buffers and contexts for computations int nchunks_thread = NCHUNKS / nthreads; int remaining_chunks = NCHUNKS - nchunks_thread * nthreads; blosc2_context **dctx = malloc(nthreads * sizeof(void*)); DTYPE** chunk = malloc(nthreads * sizeof(void*)); for (j = 0; j < nthreads; j++) { chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE)); } // Reduce uncompressed dataset blosc_set_timestamp(&last); ttotal = 1e10; compressed_sum = 0; for (int n = 0; n < NITER; n++) { compressed_sum = 0; #pragma omp parallel for private(nchunk) reduction (+:compressed_sum) for (j = 0; j < nthreads; j++) { dctx[j] = blosc2_create_dctx(dparams); for (nchunk = 0; nchunk < nchunks_thread; nchunk++) { blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk], INT32_MAX, (void*)(chunk[j]), isize); for (i = 0; i < CHUNKSIZE; i++) { compressed_sum += chunk[j][i]; //compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE; } } } for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) { blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], INT32_MAX, (void*)(chunk[0]), isize); for (i = 0; i < CHUNKSIZE; i++) { compressed_sum += chunk[0][i]; //compressed_sum += i + nchunk * CHUNKSIZE; } } blosc_set_timestamp(&current); itotal = blosc_elapsed_secs(last, current); if (itotal < ttotal) ttotal = itotal; } printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum); printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n", ttotal, nbytes / (ttotal * MB)); //printf("sum, csum: %f, %f\n", sum, compressed_sum); if (SYNTHETIC) { // difficult to fulfill for single precision assert(sum == compressed_sum); } /* Free resources */ blosc2_schunk_free(schunk); return 0; }
illumination.c
#include<stdio.h> void srcillum_helper_float( float* field_ginsu, size_t n, int nthreads) { size_t i; #pragma omp parallel for num_threads(nthreads) for (i = 0; i < n; i++) { field_ginsu[i] = field_ginsu[i]*field_ginsu[i]; } } void srcillum_helper_double( double* field_ginsu, size_t n, int nthreads) { size_t i; #pragma omp parallel for num_threads(nthreads) for (i = 0; i < n; i++) { field_ginsu[i] = field_ginsu[i]*field_ginsu[i]; } } void illum_accumulate_float( float* field_ginsu_accum, float* field_ginsu, size_t n, int nthreads) { size_t i; #pragma omp parallel for num_threads(nthreads) for (i = 0; i < n; i++) { field_ginsu_accum[i] += field_ginsu[i]*field_ginsu[i]; } } void illum_accumulate_double( double* field_ginsu_accum, double* field_ginsu, size_t n, int nthreads) { size_t i; #pragma omp parallel for num_threads(nthreads) for (i = 0; i < n; i++) { field_ginsu_accum[i] += field_ginsu[i]*field_ginsu[i]; } }
GB_unaryop__one_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_uint16_uint16 // op(A') function: GB_tran__one_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_uint16_uint16 ( uint16_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LAGraph_BF_full.c
//------------------------------------------------------------------------------ // LAGraph_BF_full.c: Bellman-Ford single-source shortest paths, returns tree //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // // See additional acknowledgments in the LICENSE file, // or contact permission@sei.cmu.edu for the full terms. //------------------------------------------------------------------------------ // LAGraph_BF_full: Bellman-Ford single source shortest paths, returning both // the path lengths and the shortest-path tree. contributed by Jinhao Chen and // Tim Davis, Texas A&M. // LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent // nodes along the path and the hops (number of edges) in the path from given // source vertex s in the range of [0, n) on graph given as matrix A with size // n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i // to vertex j with weight w, then A(i, j) = w. Furthermore, LAGraph_BF_full // requires A(i, i) = 0 for all 0 <= i < n. // TODO: think about the return values // LAGraph_BF_full returns GrB_SUCCESS regardless of existence of negative- // weight cycle. However, the GrB_Vector d(k), pi(k) and h(k) (i.e., // *pd_output, *ppi_output and *ph_output respectively) will be NULL when // negative-weight cycle detected. Otherwise, the vector d has d(k) as the // shortest distance from s to k. pi(k) = p+1, where p is the parent node of // k-th node in the shortest path. In particular, pi(s) = 0. h(k) = hop(s, k), // the number of edges from s to k in the shortest path. //------------------------------------------------------------------------------ #define LAGraph_FREE_ALL \ { \ GrB_free(&d); \ GrB_free(&dtmp); \ GrB_free(&Atmp); \ GrB_free(&BF_Tuple3); \ GrB_free(&BF_lMIN_Tuple3); \ GrB_free(&BF_PLUSrhs_Tuple3); \ GrB_free(&BF_EQ_Tuple3); \ GrB_free(&BF_lMIN_Tuple3_Monoid); \ GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \ LAGraph_Free ((void**)&I); \ LAGraph_Free ((void**)&J); \ LAGraph_Free ((void**)&w); \ LAGraph_Free ((void**)&W); \ LAGraph_Free ((void**)&h); \ LAGraph_Free ((void**)&pi); \ } #include <LAGraph.h> #include <LAGraphX.h> #include <LG_internal.h> // from src/utility typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ; //------------------------------------------------------------------------------ // data type for each entry of the adjacent matrix A and "distance" vector d; // <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and // the value <0, 0, NULL> corresponds to a path from a vertex to itself //------------------------------------------------------------------------------ typedef struct { double w; // w corresponds to a path weight. GrB_Index h; // h corresponds to a path size or number of hops. GrB_Index pi;// pi corresponds to the penultimate vertex along a path. // vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil) // for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E } BF_Tuple3_struct; //------------------------------------------------------------------------------ // 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3 //------------------------------------------------------------------------------ void BF_lMIN ( BF_Tuple3_struct *z, const BF_Tuple3_struct *x, const BF_Tuple3_struct *y ) { if (x->w < y->w || (x->w == y->w && x->h < y->h) || (x->w == y->w && x->h == y->h && x->pi < y->pi)) { if (z != x) { *z = *x; } } else { *z = *y; } } void BF_PLUSrhs ( BF_Tuple3_struct *z, const BF_Tuple3_struct *x, const BF_Tuple3_struct *y ) { z->w = x->w + y->w; z->h = x->h + y->h; if (x->pi != UINT64_MAX && y->pi != 0) { z->pi = y->pi; } else { z->pi = x->pi; } } void BF_EQ ( bool *z, const BF_Tuple3_struct *x, const BF_Tuple3_struct *y ) { if (x->w == y->w && x->h == y->h && x->pi == y->pi) { *z = true; } else { *z = false; } } // Given a n-by-n adjacency matrix A and a source vertex s. // If there is no negative-weight cycle reachable from s, return the distances // of shortest paths from s and parents along the paths as vector d. Otherwise, // returns d=NULL if there is a negtive-weight cycle. // pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the // sum of edges length in the shortest path // ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the // parent of i-th vertex in the shortest path // ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the // number of edges from s to i in the shortest path // A has zeros on diagonal and weights on corresponding entries of edges // s is given index for source vertex GrB_Info LAGraph_BF_full ( GrB_Vector *pd_output, //the pointer to the vector of distance GrB_Vector *ppi_output, //the pointer to the vector of parent GrB_Vector *ph_output, //the pointer to the vector of hops const GrB_Matrix A, //matrix for the graph const GrB_Index s //given index of the source ) { GrB_Info info; // tmp vector to store distance vector after n (i.e., V) loops GrB_Vector d = NULL, dtmp = NULL; GrB_Matrix Atmp = NULL; GrB_Type BF_Tuple3; GrB_BinaryOp BF_lMIN_Tuple3; GrB_BinaryOp BF_PLUSrhs_Tuple3; GrB_BinaryOp BF_EQ_Tuple3; GrB_Monoid BF_lMIN_Tuple3_Monoid; GrB_Semiring BF_lMIN_PLUSrhs_Tuple3; GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A GrB_Index *h = NULL, *pi = NULL; double *w = NULL; BF_Tuple3_struct *W = NULL; if (A == NULL || pd_output == NULL || ppi_output == NULL || ph_output == NULL) { // required argument is missing LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ; } *pd_output = NULL; *ppi_output = NULL; *ph_output = NULL; LAGRAPH_OK (GrB_Matrix_nrows (&nrows, A)) ; LAGRAPH_OK (GrB_Matrix_ncols (&ncols, A)) ; LAGRAPH_OK (GrB_Matrix_nvals (&nz, A)); if (nrows != ncols) { // A must be square LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ; } n = nrows; if (s >= n || s < 0) { LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE); } //-------------------------------------------------------------------------- // create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring //-------------------------------------------------------------------------- // GrB_Type LAGRAPH_OK (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct))); // GrB_BinaryOp LAGRAPH_OK (GrB_BinaryOp_new(&BF_EQ_Tuple3, (LAGraph_binary_function) (&BF_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3)); LAGRAPH_OK (GrB_BinaryOp_new(&BF_lMIN_Tuple3, (LAGraph_binary_function) (&BF_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3)); LAGRAPH_OK (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3, (LAGraph_binary_function)(&BF_PLUSrhs), BF_Tuple3, BF_Tuple3, BF_Tuple3)); // GrB_Monoid BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY, .h = UINT64_MAX, .pi = UINT64_MAX }; LAGRAPH_OK (GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3, &BF_identity)); //GrB_Semiring LAGRAPH_OK (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3, BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3)); //-------------------------------------------------------------------------- // allocate arrays used for tuplets //-------------------------------------------------------------------------- I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; w = LAGraph_Malloc (nz, sizeof(double)) ; W = LAGraph_Malloc (nz, sizeof(BF_Tuple3_struct)) ; if (I == NULL || J == NULL || w == NULL || W == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // create matrix Atmp based on A, while its entries become BF_Tuple3 type //-------------------------------------------------------------------------- LAGRAPH_OK (GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A)); int nthreads; LAGRAPH_OK (LAGraph_GetNumThreads (&nthreads, NULL)) ; printf ("nthreads %d\n", nthreads) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0; k < nz; k++) { if (w[k] == 0) //diagonal entries { W[k] = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 }; } else { W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 }; } } LAGRAPH_OK (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n)); LAGRAPH_OK (GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3)); //-------------------------------------------------------------------------- // create and initialize "distance" vector d //-------------------------------------------------------------------------- LAGRAPH_OK (GrB_Vector_new(&d, BF_Tuple3, n)); // initial distance from s to itself BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 }; LAGRAPH_OK (GrB_Vector_setElement_UDT(d, &d0, s)); //-------------------------------------------------------------------------- // start the Bellman Ford process //-------------------------------------------------------------------------- // copy d to dtmp in order to create a same size of vector LAGRAPH_OK (GrB_Vector_dup(&dtmp, d)); bool same= false; // variable indicating if d == dtmp int64_t iter = 0; // number of iterations // terminate when no new path is found or more than V-1 loops while (!same && iter < n - 1) { // execute semiring on d and A, and save the result to dtmp LAGRAPH_OK (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, d, Atmp, GrB_NULL)); LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL)); if (!same) { GrB_Vector ttmp = dtmp; dtmp = d; d = ttmp; } iter ++; } // check for negative-weight cycle only when there was a new path in the // last loop, otherwise, there can't be a negative-weight cycle. if (!same) { // execute semiring again to check for negative-weight cycle LAGRAPH_OK (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, d, Atmp, GrB_NULL)); // if d != dtmp, then there is a negative-weight cycle in the graph LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL)); if (!same) { // printf("A negative-weight cycle found. \n"); LAGraph_FREE_ALL; return (GrB_SUCCESS) ; } } //-------------------------------------------------------------------------- // extract tuple from "distance" vector d and create GrB_Vectors for output //-------------------------------------------------------------------------- LAGRAPH_OK (GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d)); h = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; pi = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; if (w == NULL || h == NULL || pi == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } for (GrB_Index k = 0; k < nz; k++) { w [k] = W[k].w ; h [k] = W[k].h ; pi[k] = W[k].pi; } LAGRAPH_OK (GrB_Vector_new(pd_output, GrB_FP64, n)); LAGRAPH_OK (GrB_Vector_new(ppi_output, GrB_UINT64, n)); LAGRAPH_OK (GrB_Vector_new(ph_output, GrB_UINT64, n)); LAGRAPH_OK (GrB_Vector_build_FP64 (*pd_output , I, w , nz,GrB_MIN_FP64 )); LAGRAPH_OK (GrB_Vector_build_UINT64(*ppi_output, I, pi, nz,GrB_MIN_UINT64)); LAGRAPH_OK (GrB_Vector_build_UINT64(*ph_output , I, h , nz,GrB_MIN_UINT64)); LAGraph_FREE_ALL; return (GrB_SUCCESS) ; }
hdp_math_utils.c
#include <math.h> #include <tgmath.h> #include <stdlib.h> #include <stdio.h> #include <float.h> #include <stdbool.h> #include <inttypes.h> #include "hdp_math_utils.h" #include "sonLib.h" #define LOG_ROOT_PI 0.572364942924700087071713 #define LOG_4 1.386294361119890618834464 #ifndef M_PI #define M_PI 3.14159265358979323846264338 #endif #ifndef EULER_MASCHERONI #define EULER_MASCHERONI 0.57721566490153286060651209008240243 #endif #ifndef MACHEP #define MACHEP 1.11022302462515654042E-16 #endif #ifndef MINUS_INF #define MINUS_INF -0.5 * DBL_MAX #endif void parallel_cdf(double* cdf, double* probs, int64_t length, int64_t chunk_size) { if (2 * chunk_size >= length) { double cumul = 0.0; for (int64_t i = 0; i < length; i++) { cumul += probs[i]; cdf[i] = cumul; } return; } int64_t num_chunks = (length - 1) / chunk_size + 1; #pragma omp parallel for shared(cdf,probs) for (int64_t i = 0; i < num_chunks; i++) { int64_t start = i * chunk_size; int64_t stop = start + chunk_size; if (stop > length) { stop = length; } double partial_cumul = 0.0; for (int64_t j = start; j < stop; j++) { partial_cumul += probs[j]; cdf[j] = partial_cumul; } } double* partial_sums = (double*) malloc(sizeof(double) * num_chunks); double partial_sums_cumul = 0.0; for (int64_t i = chunk_size - 1; i < length; i += chunk_size) { partial_sums_cumul += cdf[i]; partial_sums[i / chunk_size] = partial_sums_cumul; } #pragma omp parallel for shared(cdf,partial_sums) for (int64_t i = chunk_size; i < length; i++) { cdf[i] += partial_sums[i / chunk_size - 1]; } free(partial_sums); } double parallel_max(double* x, int64_t length) { double max_val = MINUS_INF; #pragma omp parallel shared(max_val) { double local_max = MINUS_INF; #pragma omp for nowait for (int64_t i = 0; i < length; i++) { if (x[i] > local_max) { local_max = x[i]; } } #pragma omp critical { if (local_max > max_val) { max_val = local_max; } } } return max_val; } void parallel_add(double add_val, double* x, int64_t length) { #pragma omp parallel for for (int64_t i = 0; i < length; i++) { x[i] += add_val; } } void parallel_exp(double* x, int64_t length) { #pragma omp parallel for for (int64_t i = 0; i < length; i++) { x[i] = exp(x[i]); } } typedef struct LogGammaHalfMemo LogGammaHalfMemo; struct LogGammaHalfMemo { double alpha; double* zero_offset_memo; int64_t zero_offset_final_entry; int64_t zero_offset_length; double* half_offset_memo; int64_t half_offset_final_entry; int64_t half_offset_length; }; LogGammaHalfMemo* new_log_gamma_memo(double alpha) { LogGammaHalfMemo* memo = (LogGammaHalfMemo*) malloc(sizeof(LogGammaHalfMemo)); memo->alpha = alpha; double* zero_base_case = (double*) malloc(sizeof(double)); zero_base_case[0] = lgamma(alpha); memo->zero_offset_final_entry = 0; memo->zero_offset_memo = zero_base_case; memo->zero_offset_length = 1; double* half_base_case = (double*) malloc(sizeof(double)); half_base_case[0] = lgamma(alpha + .5); memo->half_offset_final_entry = 0; memo->half_offset_memo = half_base_case; memo->half_offset_length = 1; return memo; } void destroy_log_gamma_memo(LogGammaHalfMemo* memo) { free(memo->half_offset_memo); free(memo->zero_offset_memo); free(memo); } void extend_gamma_zero_offset_memo(LogGammaHalfMemo* memo) { int64_t final_entry = memo->half_offset_final_entry + 1; memo->zero_offset_final_entry = final_entry; double* current_array = memo->zero_offset_memo; int64_t current_length = memo->zero_offset_length; if (current_length == final_entry) { int64_t new_array_length = current_length * 2; double* new_array = (double*) malloc(sizeof(double) * new_array_length); for (int64_t i = 0; i < current_length; i++) { new_array[i] = current_array[i]; } memo->zero_offset_length = new_array_length; memo->zero_offset_memo = new_array; free(current_array); current_array = new_array; } double log_term = log(memo->alpha - 1.0 + (double) final_entry); current_array[final_entry] = current_array[final_entry - 1] + log_term; } void extend_gamma_half_offset_memo(LogGammaHalfMemo* memo) { int64_t final_entry = memo->half_offset_final_entry + 1; memo->half_offset_final_entry = final_entry; double* current_array = memo->half_offset_memo; int64_t current_length = memo->half_offset_length; if (current_length == final_entry) { int64_t new_array_length = current_length * 2; double* new_array = (double*) malloc(sizeof(double) * new_array_length); for (int64_t i = 0; i < current_length; i++) { new_array[i] = current_array[i]; } memo->half_offset_length = new_array_length; memo->half_offset_memo = new_array; free(current_array); current_array = new_array; } double log_term = log(memo->alpha -.5 + (double) final_entry); current_array[final_entry] = current_array[final_entry - 1] + log_term; } // returns log(Gamma(memo->alpha + n / 2)) double offset_log_gamma_half(int64_t n, LogGammaHalfMemo* memo) { int64_t idx = n / 2; if (n % 2 == 0) { while (memo->zero_offset_final_entry < idx) { extend_gamma_zero_offset_memo(memo); } return memo->zero_offset_memo[idx]; } else { while (memo->half_offset_final_entry < idx) { extend_gamma_half_offset_memo(memo); } return memo->half_offset_memo[idx]; } } struct SumOfLogsMemo { double* memo_array; int64_t final_entry; int64_t array_length; }; SumOfLogsMemo* new_log_sum_memo() { SumOfLogsMemo* memo = (SumOfLogsMemo*) malloc(sizeof(SumOfLogsMemo)); double* base_case = (double*) malloc(sizeof(double)); base_case[0] = 0.0; memo->memo_array = base_case; memo->final_entry = 1; memo->array_length = 1; return memo; } void destroy_log_sum_memo(SumOfLogsMemo* memo) { free(memo->memo_array); free(memo); } void extend_log_sum_memo(SumOfLogsMemo* memo) { int64_t final_entry = memo->final_entry; int64_t current_length = memo->array_length; if (current_length == final_entry) { double* current_array = memo->memo_array; int64_t new_array_length = current_length * 2; double* new_array = (double*) malloc(sizeof(double) * new_array_length); for (int64_t i = 0; i < current_length; i++) { new_array[i] = current_array[i]; } memo->array_length = new_array_length; memo->memo_array = new_array; free(current_array); } double log_term = log((double) final_entry + 1); memo->memo_array[final_entry] = memo->memo_array[final_entry - 1] + log_term; (memo->final_entry)++; } double sum_of_logs(SumOfLogsMemo* memo, int64_t n) { while (n > memo->final_entry) { extend_log_sum_memo(memo); } return memo->memo_array[n - 1]; } // returns log(Gamma(n / 2)) in amortized constant time with low risk of overflow double log_gamma_half(int64_t n, SumOfLogsMemo* sum_of_logs_memo) { if (n <= 2) { fprintf(stderr, "log_gamma_half only supports n > 2\n"); exit(EXIT_FAILURE); } if (n % 2 == 0) { return sum_of_logs(sum_of_logs_memo, n / 2 - 1); } else { return LOG_ROOT_PI - (n / 2) * LOG_4 + sum_of_logs(sum_of_logs_memo, n - 1) - sum_of_logs(sum_of_logs_memo, n / 2); } } // returns log(x + y) without leaving log transformed space double add_logs(double log_x, double log_y) { if (log_x > log_y) { return log_x + log(1.0 + exp(log_y - log_x)); } else { return log_y + log(1.0 + exp(log_x - log_y)); } } // quick-select algorithm on array copy (does not alter original array) double quickselect(double* arr, int64_t length, int64_t target_idx) { if (target_idx < 0 || target_idx >= length) { fprintf(stderr, "Order statistic outside of array bounds\n"); exit(EXIT_FAILURE); } double* arr_copy = (double*) malloc(sizeof(double) * length); for (int64_t i = 0; i < length; i++ ) { arr_copy[i] = arr[i]; } int64_t low = 0; int64_t hi = length - 1; int64_t mid; int64_t median; double temp; while (true) { // median of three technique mid = (hi + low) / 2; if (arr_copy[hi] > arr_copy[mid]) { if (arr_copy[hi] > arr_copy[low]) { if (arr_copy[mid] > arr_copy[low]) { median = mid; } else { median = low; } } else { median = hi; } } else { if (arr_copy[hi] > arr_copy[low]) { median = hi; } else { if (arr_copy[mid] > arr_copy[low]) { median = low; } else { median = mid; } } } // remove pivot temp = arr_copy[median]; arr_copy[median] = arr_copy[hi]; arr_copy[hi] = temp; // partition array int64_t pivot = low; for (int64_t i = low; i < hi; i++) { if (arr_copy[i] < arr_copy[hi]) { temp = arr_copy[i]; arr_copy[i] = arr_copy[pivot]; arr_copy[pivot] = temp; pivot++; } } temp = arr_copy[pivot]; arr_copy[pivot] = arr_copy[hi]; arr_copy[hi] = temp; if (pivot == target_idx) { return arr_copy[pivot]; } else if (pivot < target_idx) { low = pivot + 1; } else { hi = pivot - 1; } } } double median(double* arr, int64_t length) { return quickselect(arr, length, length / 2); } double max(double* arr, int64_t length) { double curr_max = arr[0]; for (int64_t i = 1; i < length; i++) { if (arr[i] > curr_max) { curr_max = arr[i]; } } return curr_max; } // returns the index of the first element of arr greater or equal to x, assuming arr is sorted // returns final index if x is greater than all elements of arr int64_t bisect_left(double x, double* arr, int64_t length) { if (x <= arr[0]) { return 0; } int64_t low = 0; int64_t hi = length - 1; int64_t mid; double arr_mid; while (hi > low + 1) { mid = (hi + low) / 2; arr_mid = arr[mid]; if (x <= arr_mid) { hi = mid; } else { low = mid; } } return hi; } void spline_knot_slopes_internal(double* x, double* y, double* k, int64_t idx, double center_coef_prev, double right_coef_prev, double rhs_prev, int64_t final_idx) { if (idx == final_idx) { double left_coef = 1.0 / (x[idx] - x[idx - 1]); double center_coef = 2.0 * left_coef; double rhs = 3.0 * (y[idx] - y[idx - 1]) * left_coef * left_coef; // Cramer's rule k[idx] = (rhs * center_coef_prev - rhs_prev * left_coef) / (center_coef * center_coef_prev - right_coef_prev * left_coef); return; } double left_coef = 1.0 / (x[idx] - x[idx - 1]); double right_coef = 1.0 / (x[idx + 1] - x[idx]); double center_coef = 2.0 * (left_coef + right_coef); double rhs = 3.0 * ((y[idx] - y[idx - 1]) * left_coef * left_coef + (y[idx + 1] - y[idx]) * right_coef * right_coef); center_coef -= left_coef * right_coef_prev / center_coef_prev; rhs -= left_coef * rhs_prev / center_coef_prev; spline_knot_slopes_internal(x, y, k, idx + 1, center_coef, right_coef, rhs, final_idx); k[idx] = (rhs - right_coef * k[idx + 1]) / center_coef; } double* spline_knot_slopes(double* x, double* y, int64_t length) { double* k = (double*) malloc(sizeof(double) * length); double right_coef = 1.0 / (x[1] - x[0]); double center_coef = 2.0 * right_coef; double rhs = 3.0 * (y[1] - y[0]) * right_coef * right_coef; spline_knot_slopes_internal(x, y, k, 1, center_coef, right_coef, rhs, length - 1); k[0] = (rhs - right_coef * k[1]) / center_coef; return k; } double spline_interp(double query_x, double* x, double* y, double* slope, int64_t length) { if (query_x <= x[0]) { return y[0] - slope[0] * (x[0] - query_x); } else if (query_x >= x[length - 1]) { int64_t n = length - 1; return y[n] + slope[n] * (query_x - x[n]); } else { int64_t idx_right = bisect_left(query_x, x, length); int64_t idx_left = idx_right - 1; double dx = x[idx_right] - x[idx_left]; double dy = y[idx_right] - y[idx_left]; double a = slope[idx_left] * dx - dy; double b = dy - slope[idx_right] * dx; double t_left = (query_x - x[idx_left]) / dx; double t_right = 1.0 - t_left; return t_right * y[idx_left] + t_left * y[idx_right] + t_left * t_right * (a * t_right + b * t_left); } } // assumes even spacing of x points double grid_spline_interp(double query_x, double* x, double* y, double* slope, int64_t length) { if (query_x <= x[0]) { return y[0] - slope[0] * (x[0] - query_x); } else if (query_x >= x[length - 1]) { int64_t n = length - 1; return y[n] + slope[n] * (query_x - x[n]); } else { double dx = x[1] - x[0]; int64_t idx_left = (int64_t) ((query_x - x[0]) / dx); int64_t idx_right = idx_left + 1; double dy = y[idx_right] - y[idx_left]; double a = slope[idx_left] * dx - dy; double b = dy - slope[idx_right] * dx; double t_left = (query_x - x[idx_left]) / dx; double t_right = 1.0 - t_left; return t_right * y[idx_left] + t_left * y[idx_right] + t_left * t_right * (a * t_right + b * t_left); } } double* linspace(double start, double stop, int64_t length) { if (start >= stop) { fprintf(stderr, "linspace requires stop > start\n"); exit(EXIT_FAILURE); } double* lin = (double*) malloc(sizeof(double) * length); int64_t n = length - 1; double dx = (stop - start) / ((double) n); for (int64_t i = 0; i < n; i++) { lin[i] = start + i * dx; } lin[n] = stop; return lin; } double rand_standard_uniform() { return ((double) rand()) / ((double) RAND_MAX); } double rand_uniform(double a) { return ((double) rand()) / ((double) RAND_MAX / a); } bool rand_bernoulli(double p) { return (rand_standard_uniform() < p); } double rand_exponential(double lambda) { double draw; do { draw = rand_standard_uniform(); } while (draw == 1.0); return -log(1.0 - draw) / lambda; } double log_posterior_conditional_term(double nu_post, double two_alpha_post, double beta_post) {//, SumOfLogsMemo* memo) { // return log_gamma_half((int64_t) two_alpha_post, memo) // - .5 * (log(nu_post) + two_alpha_post * log(beta_post)); return lgamma( 0.5 * two_alpha_post) - .5 * (log(nu_post) + two_alpha_post * log(beta_post)); } void normal_inverse_gamma_params(double* x, int64_t length, double* mu_out, double* nu_out, double* alpha_out, double* beta_out) { double mean = 0.0; for (int64_t i = 0; i < length; i++) { mean += x[i]; } mean /= (double) length; double dev; double sum_sq_devs = 0.0; for (int64_t i = 0; i < length; i++) { dev = x[i] - mean; sum_sq_devs += dev * dev; } *mu_out = mean; *nu_out = (double) length; *alpha_out = ((double) length - 1.0) / 2.0; *beta_out = .5 * sum_sq_devs; } static double A_digamma[] = { 8.33333333333333333333E-2, -2.10927960927960927961E-2, 7.57575757575757575758E-3, -4.16666666666666666667E-3, 3.96825396825396825397E-3, -8.33333333333333333333E-3, 8.33333333333333333333E-2 }; // modified from Scipy source: https://github.com/scipy/scipy/blob/master/scipy/special/cephes/psi.c static double polevl(double x, double coef[], int N) { double ans; int i; double *p; p = coef; ans = *p++; i = N; do ans = ans * x + *p++; while (--i); return (ans); } double digamma(double x) { double p, q, nz, s, w, y, z; int i, n, negative; negative = 0; nz = 0.0; if (x <= 0.0) { negative = 1; q = x; p = floor(q); if (p == q) { fprintf(stderr, "Digamma evaluated at singularity.\n"); exit(EXIT_FAILURE); } /* Remove the zeros of tan(NPY_PI x) * by subtracting the nearest integer from x */ nz = q - p; if (nz != 0.5) { if (nz > 0.5) { p += 1.0; nz = q - p; } nz = M_PI / tan(M_PI * nz); } else { nz = 0.0; } x = 1.0 - x; } /* check for positive integer up to 10 */ if ((x <= 10.0) && (x == floor(x))) { y = 0.0; n = x; for (i = 1; i < n; i++) { w = i; y += 1.0 / w; } y -= EULER_MASCHERONI; goto digamma_done; } s = x; w = 0.0; while (s < 10.0) { w += 1.0 / s; s += 1.0; } if (s < 1.0e17) { z = 1.0 / (s * s); y = z * polevl(z, A_digamma, 6); } else y = 0.0; y = log(s) - (0.5 / s) - y - w; digamma_done: if (negative) { y -= nz; } return y; } // modified from SciPy source: https://github.com/scipy/scipy/blob/master/scipy/special/cephes/zeta.c static double A_zeta[] = { 12.0, -720.0, 30240.0, -1209600.0, 47900160.0, -1.8924375803183791606e9, /*1.307674368e12/691 */ 7.47242496e10, -2.950130727918164224e12, /*1.067062284288e16/3617 */ 1.1646782814350067249e14, /*5.109094217170944e18/43867 */ -4.5979787224074726105e15, /*8.028576626982912e20/174611 */ 1.8152105401943546773e17, /*1.5511210043330985984e23/854513 */ -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091 */ }; double hurwitz_zeta(double x, double q) { int i; double a, b, k, s, t, w; if (x == 1.0) goto retinf; if (x < 1.0) { domerr: fprintf(stderr, "Domain error in zeta function.\n"); exit(EXIT_FAILURE); } if (q <= 0.0) { if (q == floor(q)) { retinf: fprintf(stderr, "Evaluted zeta function at singularity.\n"); exit(EXIT_FAILURE); } if (x != floor(x)) goto domerr; /* because q^-x not defined */ } /* Asymptotic expansion * http://dlmf.nist.gov/25.11#E43 */ if (q > 1e8) { return (1/(x - 1) + 1/(2*q)) * pow(q, 1 - x); } /* Euler-Maclaurin summation formula */ /* Permit negative q but continue sum until n+q > +9 . * This case should be handled by a reflection formula. * If q<0 and x is an integer, there is a relation to * the polyGamma function. */ s = pow(q, -x); a = q; i = 0; b = 0.0; while ((i < 9) || (a <= 9.0)) { i += 1; a += 1.0; b = pow(a, -x); s += b; if (fabs(b / s) < MACHEP) goto zeta_done; } w = a; s += b * w / (x - 1.0); s -= 0.5 * b; a = 1.0; k = 0.0; for (i = 0; i < 12; i++) { a *= x + k; b /= w; t = a * b / A_zeta[i]; s = s + t; t = fabs(t / s); if (t < MACHEP) goto zeta_done; k += 1.0; a *= x + k; b /= w; k += 1.0; } zeta_done: return (s); } double trigamma(double x) { return hurwitz_zeta(2.0, x); } double newton_approx_alpha(int64_t length, double sum_log_tau, double sum_tau) { double constant = sum_log_tau / length - log( sum_tau / length); double alpha = (double) 1.0; double f_alpha; double df_alpha; double alpha_prime; while (true) { f_alpha = log(alpha) - digamma(alpha) + constant; df_alpha = 1.0 / alpha - trigamma(alpha); if (df_alpha == 0.0 || df_alpha != df_alpha) { fprintf(stderr, "MLE estimation of alpha numerically unstable at designated starting value.\n"); exit(EXIT_FAILURE); } alpha_prime = alpha - f_alpha / df_alpha; if (fabs(alpha - alpha_prime) < MACHEP) { return alpha_prime; } alpha = alpha_prime; } } void mle_normal_inverse_gamma_params(double* mus, double* taus, int64_t length, double* mu_0_out, double* nu_out, double* alpha_out, double* beta_out) { double sum_tau = 0.0; double sum_log_tau = 0.0; for (int64_t i = 0; i < length; i++) { sum_tau += taus[i]; sum_log_tau += log(taus[i]); } double mu_0 = 0.0; for (int64_t i = 0; i < length; i++) { mu_0 += mus[i] * taus[i]; } mu_0 /= sum_tau; double sum_weighted_sq_devs = 0.0; double dev; for (int64_t i = 0; i < length; i++) { dev = mus[i] - mu_0; sum_weighted_sq_devs += taus[i] * dev * dev; } double nu = ((double) length) / sum_weighted_sq_devs; double alpha = newton_approx_alpha(length, sum_log_tau, sum_tau); double beta = length * alpha / sum_tau; *mu_0_out = mu_0; *nu_out = nu; *alpha_out = alpha; *beta_out = beta; } int64_t* stList_toIntPtr(stList* list, int64_t* length_out) { int64_t length = (int64_t) stList_length(list); int64_t* int_arr = (int64_t*) malloc(sizeof(int64_t) * length); int64_t* entry; for (int64_t i = 0; i < length; i++) { entry = (int64_t*) stList_get(list, i); int_arr[i] = *entry; } *length_out = length; return int_arr; } double* stList_toDoublePtr(stList* list, int64_t* length_out) { int64_t length = stList_length(list); double* double_arr = (double*) malloc(sizeof(double) * length); double* entry; for (int64_t i = 0; i < length; i++) { entry = (double*) stList_get(list, i); double_arr[i] = *entry; } *length_out = length; return double_arr; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
myprog.c
#define MSIZE 200 int n, m, mits; double tol, relax = 1.0, alpha = 0.0543; double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE]; double dx, dy; void initialize () { int i, j, xx, yy; // double PI = 3.1415926; dx = 2.0 / (n - 1); // -->dx@112:2 dy = 2.0 / (m - 1); //-->dy@113:2 /* Initialize initial condition and RHS */ //#pragma omp parallel for private(i,j,xx,yy) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */ yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */ u[i][j] = 0.0; f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy) - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy); } }
vadd.orio.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> double getClock() { struct timezone tzp; struct timeval tp; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main(int argc, char *argv[]) { double *y; double *x1; double *x2; double *x3; #pragma disjoint (*x1,*x2,*x3,*y) int n = N; { int i1; y = (double*) malloc((n) * sizeof(double)); x1 = (double*) malloc((n) * sizeof(double)); x2 = (double*) malloc((n) * sizeof(double)); x3 = (double*) malloc((n) * sizeof(double)); for (i1=0; i1<n; i1++) { x1[i1] = (i1+1) % 4 + 1; x2[i1] = (i1+5) % 10 + 1; x3[i1] = (i1+3) % 6 + 1; y[i1] = 0; } } double orio_t_start, orio_t_end, orio_t_total=0; int orio_i; int reps = REPS; #ifdef TEST reps = 1; #endif orio_t_start = getClock(); for (orio_i=0; orio_i<reps; orio_i++) { int i; if (n <= 65) { // parallelize=False, ufactor=3 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); for (i=0; i<=n-3; i=i+3) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { for (i=0; i<=n-3; i=i+3) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } #ifdef ORIO_SEQ } else if (n <= 810) { // parallelize=False, ufactor=10 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); for (i=0; i<=n-10; i=i+10) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { for (i=0; i<=n-10; i=i+10) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } } else if (n <= 131250) { // parallelize=False, ufactor=20 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); for (i=0; i<=n-20; i=i+20) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)]; y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)]; y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)]; y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)]; y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)]; y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)]; y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)]; y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { for (i=0; i<=n-20; i=i+20) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)]; y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)]; y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)]; y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)]; y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)]; y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)]; y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)]; y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } } else { // parallelize=False, ufactor=16 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); for (i=0; i<=n-16; i=i+16) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)]; y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)]; y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)]; y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { for (i=0; i<=n-16; i=i+16) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)]; y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)]; y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)]; y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } } #elif ORIO_PAR } else if (n <= 835) { // parallelize=False, ufactor=10 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); for (i=0; i<=n-10; i=i+10) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { for (i=0; i<=n-10; i=i+10) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; } for (; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } } else if (n <= 132500) { // parallelize=True, ufactor=20 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); #pragma omp parallel for for (i=0; i<=n-20; i=i+20) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)]; y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)]; y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)]; y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)]; y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)]; y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)]; y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)]; y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)]; } for (i=n-((n-1)%20)-1; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { #pragma omp parallel for for (i=0; i<=n-20; i=i+20) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)]; y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)]; y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)]; y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)]; y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)]; y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)]; y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)]; y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)]; } for (i=n-((n-1)%20)-1; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } } else { // parallelize=True, ufactor=12 if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) { __alignx(16,x1); __alignx(16,x2); __alignx(16,x3); __alignx(16,y); #pragma omp parallel for for (i=0; i<=n-12; i=i+12) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; } for (i=n-((n-1)%12)-1; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } else { #pragma omp parallel for for (i=0; i<=n-12; i=i+12) { y[i]=x1[i]+x2[i]+x3[i]; y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)]; y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)]; y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)]; y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)]; y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)]; y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)]; y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)]; y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)]; y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)]; y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)]; y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)]; } for (i=n-((n-1)%12)-1; i<=n-1; i=i+1) y[i]=x1[i]+x2[i]+x3[i]; } } #else printf("error\n"); exit(1); #endif } orio_t_end = getClock(); orio_t_total = orio_t_end - orio_t_start; orio_t_total = orio_t_total / REPS; double mflops = (8.0*N)/(orio_t_total*1000000); #ifdef TEST { int i; for (i=0; i<=n-1; i++) { if (i%10 == 0) printf("\n"); printf("%f ",y[i]); } } #else printf("%f\t%f\n", orio_t_total, mflops); #endif return y[0]; }
GB_binop__bget_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint8) // C=scalar+B GB (_bind1st__bget_uint8) // C=scalar+B' GB (_bind1st_tran__bget_uint8) // C=A+scalar GB (_bind2nd__bget_uint8) // C=A'+scalar GB (_bind2nd_tran__bget_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, uint8_t, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT8 || GxB_NO_BGET_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bget_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_levelset_distance_calculator.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED ) #define KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "utilities/geometry_utilities.h" #include "includes/deprecated_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ template< unsigned int TDim> class ParallelDistanceCalculator { public: ///@name Type Definitions ///@{ KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE); /// Pointer definition of ParallelDistanceCalculator KRATOS_CLASS_POINTER_DEFINITION(ParallelDistanceCalculator); ///@} ///@name Life Cycle ///@{ /// Default constructor. ParallelDistanceCalculator() {}; /// Destructor. virtual ~ParallelDistanceCalculator() {}; ///Function to calculate a signed distance function suitable for calculations using the Level Set Method ///the function assumes given a "signed distance" distributions and recomputes the distances ///respecting as accurately as possible the position of the zero of the original distributions ///@param rModelPart is the ModelPart on which we will operate ///@param rDistanceVar is the Variable that we will use in calculating the distance ///@param rAreaVar is the Variable that we will use for L2 projections ///@param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances ///@param max_distance distances will not be computed after reaching this limit void CalculateDistances(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance, Flags Options = NOT_CALCULATE_EXACT_DISTANCES_TO_PLANE) { KRATOS_TRY Check(rModelPart, rDistanceVar, rAreaVar); ResetVariables(rModelPart,rDistanceVar, max_distance); CalculateExactDistancesOnDividedElements(rModelPart, rDistanceVar, rAreaVar, max_distance, Options); ExtendDistancesByLayer(rModelPart, rDistanceVar, rAreaVar, max_levels, max_distance); AssignDistanceSign(rModelPart, rDistanceVar, rAreaVar, max_distance); KRATOS_CATCH("") } ///Function to calculate a signed distance function suitable for calculations using the Level Set Method ///The difference of this function with previous one is the fact that it wont recalculate the exact distance ///in divided elements in order to preserve the current distance. ///the function assumes given a "signed distance" distributions and recomputes the distances ///respecting as accurately as possible the position of the zero of the original distributions ///@param rModelPart is the ModelPart on which we will operate ///@param rDistanceVar is the Variable that we will use in calculating the distance ///@param rAreaVar is the Variable that we will use for L2 projections ///@param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances ///@param max_distance distances will not be computed after reaching this limit void CalculateInterfacePreservingDistances(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance) { KRATOS_TRY Check(rModelPart, rDistanceVar, rAreaVar); ResetVariables(rModelPart,rDistanceVar, max_distance); AbsDistancesOnDividedElements(rModelPart, rDistanceVar, rAreaVar, max_distance); ExtendDistancesByLayer(rModelPart, rDistanceVar, rAreaVar, max_levels, max_distance); AssignDistanceSign(rModelPart, rDistanceVar, rAreaVar, max_distance); KRATOS_CATCH("") } /// A simplified version of CalculateDistances to be used when the rDistanceVar == 0 surface is described by a set of nodes /** * @param rModelPart is the ModelPart on which we will operate * @param rDistanceVar is the Variable that we will use in calculating the distance * @param rAreaVar is the Variable that we will use for L2 projections * @param max_levels is the number of maximum "layers" of element that will be used in the calculation of the distances * @param max_distance distances will not be computed after reaching this limit * @see ParallelDistanceCalculator::CalculateDistances */ void CalculateDistancesLagrangianSurface(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double max_distance) { KRATOS_TRY bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //check that variables needed are in the model part if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rDistanceVar)) ) KRATOS_THROW_ERROR(std::logic_error,"distance Variable is not in the model part",""); if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rAreaVar)) ) KRATOS_THROW_ERROR(std::logic_error,"Area Variable is not in the model part",""); if(is_distributed == true) if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(PARTITION_INDEX)) ) KRATOS_THROW_ERROR(std::logic_error,"PARTITION_INDEX Variable is not in the model part",""); array_1d<double,TDim+1> visited; const int elem_size = rModelPart.Elements().size(); const int node_size = rModelPart.Nodes().size(); // set to zero the distance #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); area = 0.0; double& is_visited = it->GetValue(IS_VISITED); double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = it->FastGetSolutionStepValue(rDistanceVar); if(is_visited != 1.0) { distance = 0.0; } else area = 1.0; // else if(dist < 0.0) // KRATOS_THROW_ERROR(std::logic_error,"ATTENTION: prescribed distance function set to a number smaller than 0!!",""); } array_1d<double,TDim+1> N; BoundedMatrix <double, TDim+1,TDim> DN_DX; // Extend the distances layer by layer up to a maximum level of layers for(unsigned int level=0; level<max_levels; level++) { //loop on active elements and advance the distance computation #pragma omp parallel for private(DN_DX,visited) for(int i = 0; i<elem_size; i++) { PointerVector< Element>::iterator it=rModelPart.ElementsBegin()+i; Geometry<Node<3> >&geom = it->GetGeometry(); for(unsigned int j=0; j<TDim+1; j++) visited[j] = (static_cast<const Node<3> & >(geom[j])).GetValue(IS_VISITED); if(IsActive(visited)) { double Volume; GeometryUtils::CalculateGeometryData(geom,DN_DX,N,Volume); AddDistanceToNodes(rDistanceVar,rAreaVar,geom,DN_DX,Volume); } } //mpi sync variables if(is_distributed == true) { #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; if(it->GetValue(IS_VISITED) == 1.0) { double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = distance; distance = 0.0; } else it->GetValue(rDistanceVar) = 0.0; } rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().AssembleCurrentData(rDistanceVar); #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; it->FastGetSolutionStepValue(rDistanceVar) += it->GetValue(rDistanceVar); } rModelPart.GetCommunicator().GetDataCommunicator().Barrier(); } //finalize the computation of the distance #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); double& is_visited = it->GetValue(IS_VISITED); if(area > 1e-20 && is_visited != 1.0) //this implies that node was computed at the current level and not before { double& distance = it->FastGetSolutionStepValue(rDistanceVar); distance /= area; is_visited = 1.0; } } } //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //assign the sign to the distance function according to the original distribution. Set to max for nodes that were not calculated #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; const double area = it->FastGetSolutionStepValue(rAreaVar); double& dist = it->FastGetSolutionStepValue(rDistanceVar); if(dist > max_distance || area <1e-20) dist = max_distance; // if(it->GetValue(IS_FLUID) == 1.0) // dist = -fabs(dist); // else // dist = fabs(dist); } KRATOS_CATCH("") } //********************************************************************************** //********************************************************************************** double FindMaximumEdgeSize(ModelPart& r_model_part) { KRATOS_TRY double h_max = 0.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double h = 0.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > h) h = l; } } h = sqrt(h); if(h > h_max) h_max = h; } h_max = r_model_part.GetCommunicator().GetDataCommunicator().MaxAll(h_max); return h_max; KRATOS_CATCH(""); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "ParallelDistanceCalculator" << TDim << "D"; return buffer.str(); }; /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "ParallelDistanceCalculator" << TDim << "D"; }; /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {}; ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ //******************************************************************* bool IsDivided(array_1d<double,TDim+1>& dist) { unsigned int positive = 0; unsigned int negative = 0; for(unsigned int i=0; i<TDim+1; i++) { if(dist[i] >= 0) positive++; else negative++; } bool is_divided = false; if(positive > 0 && negative>0) is_divided = true; return is_divided; } //******************************************************************* bool IsActive(array_1d<double,TDim+1>& visited) { unsigned int positive = 0; for(unsigned int i=0; i<TDim+1; i++) if(visited[i] > 0.9999999999) //node was considered positive++; bool is_active = false; if(positive == TDim) is_active = true; return is_active; } //******************************************************************* void ComputeExactDistances(const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Area, Geometry<Node<3> >& geom, const array_1d<double,TDim+1>& distances, array_1d<double,TDim+1>& exact_dist ) { array_1d<double,TDim> grad_d; array_1d<double,3> coord_on_0 = ZeroVector(3); array_1d<double,3> temp; //compute the gradient of the distance and normalize it noalias(grad_d) = prod(trans(DN_DX),distances); double norm = norm_2(grad_d); grad_d /= norm; //find one division point on one edge for(unsigned int i = 1; i<TDim+1; i++) { if(distances[0]*distances[i]<=0.0) //if the edge is divided { double delta_d = fabs(distances[i]) + fabs(distances[0]); if(delta_d>1e-20) { double Ni = fabs(distances[0]) / delta_d; double N0 = fabs(distances[i]) / delta_d; noalias(coord_on_0) = N0 * geom[0].Coordinates(); noalias(coord_on_0) += Ni * geom[i].Coordinates(); } else noalias(coord_on_0) = geom[0].Coordinates(); break; } } //now calculate the distance of all the nodes from the elemental free surface for(unsigned int i = 0; i<TDim+1; i++) { noalias(temp) = geom[i].Coordinates(); noalias(temp) -= coord_on_0 ; double real_distance = 0.0; for(unsigned int k=0; k<TDim; k++) real_distance += temp[k]*grad_d[k]; real_distance = fabs(real_distance); exact_dist[i] = real_distance; } } //******************************************************************* void AddDistanceToNodesNew(const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, Geometry<Node<3> >& geom, const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Volume ) { unsigned int unknown_node_index = 0; array_1d<double,TDim> d; double nodal_vol = Volume/static_cast<double>(TDim+1); double avg_dist = 0.0; Matrix coord_a(3,3); int row = 0; int reference_node_index; //compute discriminant and find the index of the unknown node noalias(d) = ZeroVector(TDim); for (unsigned int iii = 0; iii < TDim + 1; iii++) { double node_is_known = geom[iii].GetValue(IS_VISITED); if (node_is_known == 1) //identyfing the known node { reference_node_index = iii; for(int i_coord = 0 ; i_coord < 3 ; i_coord++) coord_a(row,i_coord) = geom[iii].Coordinates()[i_coord]; d[row] = geom[iii].FastGetSolutionStepValue(rDistanceVar); avg_dist += d[row]; row++; } else unknown_node_index = iii; } avg_dist /= static_cast<double>(TDim); Matrix inverse_a(3,3); double det_a; MathUtils<double>::InvertMatrix3(coord_a,inverse_a,det_a); array_1d<double,TDim> x; // normal to the surface noalias(x) = prod(inverse_a,d); double norm_x = norm_2(x); x /= norm_x; array_1d<double,TDim> v = geom[unknown_node_index].Coordinates() - geom[reference_node_index].Coordinates(); double distance = inner_prod(x,v); distance += geom[reference_node_index].FastGetSolutionStepValue(rDistanceVar); //KRATOS_WATCH(coord_a) //KRATOS_WATCH(distance) geom[unknown_node_index].SetLock(); geom[unknown_node_index].FastGetSolutionStepValue(rDistanceVar) += distance*nodal_vol; geom[unknown_node_index].FastGetSolutionStepValue(rAreaVar) += nodal_vol; geom[unknown_node_index].UnSetLock(); //GeometryUtils::CalculateTetrahedraDistances(element_geometry, dist); } //******************************************************************* void AddDistanceToNodes(const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, Geometry<Node<3> >& geom, const BoundedMatrix <double, TDim+1,TDim>& DN_DX, const double& Volume ) { unsigned int unknown_node_index = 0; array_1d<double,TDim> d; double nodal_vol = Volume/static_cast<double>(TDim+1); double avg_dist = 0.0; //compute discriminant and find the index of the unknown node noalias(d) = ZeroVector(TDim); for (unsigned int iii = 0; iii < TDim + 1; iii++) { double node_is_known = geom[iii].GetValue(IS_VISITED); if (node_is_known == 1) //identyfing the unknown node { const double distance = geom[iii].FastGetSolutionStepValue(rDistanceVar); avg_dist += distance; for (unsigned int jjj = 0; jjj < TDim; jjj++) d[jjj] += DN_DX(iii, jjj) * distance; } else unknown_node_index = iii; } avg_dist /= static_cast<double>(TDim); //finalizing computation of discriminant double c = -1.0; double a = 0.0; double b = 0.0; for (unsigned int jjj = 0; jjj < TDim; jjj++) { a += DN_DX(unknown_node_index, jjj) * DN_DX(unknown_node_index, jjj); b += d[jjj] * DN_DX(unknown_node_index, jjj); c += d[jjj] * d[jjj]; } b *= 2.0; //here we require (a*x^2 + b*x + c)^2 to be minimum (x represents the unknown distance) //this implies setting to zero //(a*x^2 + b*x + c)*(2ax+b) = 0 double distance; double discriminant = b * b - 4.0 * a*c; if (discriminant < 0.0) //here we solve (2ax+b) = 0 { // double numerator = 0.0; // double denominator = 0.0; // for(unsigned int i=0; i<TDim+1; i++) // { // for (unsigned int jjj = 0; jjj < TDim; jjj++) // { // if(i != unknown_node_index) // numerator += DN_DX(unknown_node_index, jjj) * DN_DX(i, jjj); // else // denominator += DN_DX(unknown_node_index, jjj)*DN_DX(unknown_node_index, jjj); // } // } // distance = - numerator/denominator; // // KRATOS_WATCH(geom[unknown_node_index].Id()); // KRATOS_WATCH(discriminant); distance = -b / (2.0*a); //avg_dist ; // } else //in this case we solve (a*x^2 + b*x + c)=0 { //(accurate) computation of the distance //requires the solution of a*x^2+b*x+c=0 double q, root1, root2; double sqrt_det = sqrt(discriminant); if (a != 0.0) { if (b > 0) q = -0.5 * (b + sqrt_det); else q = -0.5 * (b - sqrt_det); root1 = q / a; root2 = c / q; if (root1 > root2) distance = root1; else distance = root2; } else //in this case we have a linear equation { distance = -c / b; } } if(distance < 0.0) distance = 1e-15; geom[unknown_node_index].SetLock(); geom[unknown_node_index].FastGetSolutionStepValue(rDistanceVar) += distance*nodal_vol; geom[unknown_node_index].FastGetSolutionStepValue(rAreaVar) += nodal_vol; geom[unknown_node_index].UnSetLock(); } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void Check(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar) { KRATOS_TRY bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //check that variables needed are in the model part if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rDistanceVar)) ) KRATOS_THROW_ERROR(std::logic_error,"distance Variable is not in the model part",""); if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(rAreaVar)) ) KRATOS_THROW_ERROR(std::logic_error,"Area Variable is not in the model part",""); if(is_distributed == true) if(!(rModelPart.NodesBegin()->SolutionStepsDataHas(PARTITION_INDEX)) ) KRATOS_THROW_ERROR(std::logic_error,"PARTITION_INDEX Variable is not in the model part","") KRATOS_CATCH("") } void ResetVariables(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const double MaxDistance) { KRATOS_TRY //reset the variables needed const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; //it->FastGetSolutionStepValue(rAreaVar) = 0.0; double& dist = it->FastGetSolutionStepValue(rDistanceVar); it->SetValue(rDistanceVar,dist); //here we copy the distance function to the fixed database if(dist < 0.0) it->SetValue(IS_FLUID,1.0); else it->SetValue(IS_FLUID,0.0); dist = MaxDistance; it->SetValue(IS_VISITED,0); } KRATOS_CATCH("") } void CalculateExactDistancesOnDividedElements(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance, Flags Options) { KRATOS_TRY //identify the list of elements divided by the original distance distribution and recompute an "exact" distance //attempting to mantain the original position of the free surface //note that the backup value is used in calculating the position of the free surface and the divided elements array_1d<double,TDim+1> dist, exact_dist; array_1d<double,TDim+1> visited; // double lumping_factor = 1.0/double(TDim+1); int elem_size = rModelPart.Elements().size(); #pragma omp parallel for private(dist,exact_dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = rModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int j = 0; j < TDim + 1; j++) dist[j] = element_geometry[j].GetValue(rDistanceVar); bool is_divided = IsDivided(dist); if (is_divided == true) { if (Options.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) GeometryUtils::CalculateExactDistancesToPlane(element_geometry, dist); else GeometryUtils::CalculateTetrahedraDistances(element_geometry, dist); // loop over nodes and apply the new distances. for (unsigned int i_node = 0; i_node < element_geometry.size(); i_node++) { double& distance = element_geometry[i_node].GetSolutionStepValue(rDistanceVar); double new_distance = dist[i_node]; element_geometry[i_node].SetLock(); if (fabs(distance) > fabs(new_distance)) distance = new_distance; element_geometry[i_node].GetValue(IS_VISITED) = 1; element_geometry[i_node].UnSetLock(); } } } //mpi sync variables rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_VISITED); rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().SynchronizeCurrentDataToMin(rDistanceVar); const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& nodal_dist = it->FastGetSolutionStepValue(rDistanceVar); double& is_visited = it->GetValue(IS_VISITED); if(is_visited == 0.00) { nodal_dist = 0.00; it->GetSolutionStepValue(rAreaVar) = 0.00; } else if(is_visited >= 1.00) // This is due to the fact that I'm using the assemble instead of sync { is_visited = 1.00; it->GetSolutionStepValue(rAreaVar) = 1.00; // This is not correct } } KRATOS_CATCH("") } void AbsDistancesOnDividedElements(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance) { KRATOS_TRY //identify the list of elements divided by the original distance distribution and recompute an "exact" distance //attempting to mantain the original position of the free surface //note that the backup value is used in calculating the position of the free surface and the divided elements array_1d<double,TDim+1> dist, exact_dist; array_1d<double,TDim+1> visited; int elem_size = rModelPart.Elements().size(); #pragma omp parallel for private(dist,exact_dist) firstprivate(elem_size) for (int i = 0; i < elem_size; i++) { PointerVector< Element>::iterator it = rModelPart.ElementsBegin() + i; Geometry<Node < 3 > >& element_geometry = it->GetGeometry(); for (unsigned int j = 0; j < TDim + 1; j++) dist[j] = element_geometry[j].GetValue(rDistanceVar); bool is_divided = IsDivided(dist); if (is_divided == true) { // loop over nodes and apply the new distances. for (unsigned int i_node = 0; i_node < element_geometry.size(); i_node++) { double& distance = element_geometry[i_node].GetSolutionStepValue(rDistanceVar); double new_distance = dist[i_node]; element_geometry[i_node].SetLock(); distance = fabs(new_distance); element_geometry[i_node].GetValue(IS_VISITED) = 1; element_geometry[i_node].UnSetLock(); } } } //mpi sync variables rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_VISITED); rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().SynchronizeCurrentDataToMin(rDistanceVar); const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& nodal_dist = it->FastGetSolutionStepValue(rDistanceVar); double& is_visited = it->GetValue(IS_VISITED); if(is_visited == 0.00) { nodal_dist = 0.00; it->GetSolutionStepValue(rAreaVar) = 0.00; } else if(is_visited >= 1.00) // This is due to the fact that I'm using the assemble instead of sync { is_visited = 1.00; it->GetSolutionStepValue(rAreaVar) = 1.00; // This is not correct } } KRATOS_CATCH("") } void ExtendDistancesByLayer(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const unsigned int max_levels, const double MaxDistance) { KRATOS_TRY array_1d<double,TDim+1> visited; array_1d<double,TDim+1> N; BoundedMatrix <double, TDim+1,TDim> DN_DX; const int elem_size = rModelPart.Elements().size(); const int node_size = rModelPart.Nodes().size(); //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //now extend the distances layer by layer up to a maximum level of layers for(unsigned int level=0; level<max_levels; level++) { //loop on active elements and advance the distance computation #pragma omp parallel for private(DN_DX,visited) for(int i = 0; i<elem_size; i++) { PointerVector< Element>::iterator it=rModelPart.ElementsBegin()+i; Geometry<Node<3> >&geom = it->GetGeometry(); for(unsigned int j=0; j<TDim+1; j++) visited[j] = geom[j].GetValue(IS_VISITED); if(IsActive(visited)) { double Volume; GeometryUtils::CalculateGeometryData(geom,DN_DX,N,Volume); AddDistanceToNodes(rDistanceVar,rAreaVar,geom,DN_DX,Volume); } } bool is_distributed = false; if(rModelPart.GetCommunicator().TotalProcesses() > 1) is_distributed = true; //mpi sync variables if(is_distributed == true) { #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; if(it->GetValue(IS_VISITED) == 1.0) { double& distance = it->FastGetSolutionStepValue(rDistanceVar); it->GetValue(rDistanceVar) = distance; distance = 0.0; } else it->GetValue(rDistanceVar) = 0.0; } rModelPart.GetCommunicator().AssembleCurrentData(rAreaVar); rModelPart.GetCommunicator().AssembleCurrentData(rDistanceVar); #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; it->FastGetSolutionStepValue(rDistanceVar) += it->GetValue(rDistanceVar); } rModelPart.GetCommunicator().GetDataCommunicator().Barrier(); } //finalize the computation of the distance #pragma omp parallel for private(DN_DX) for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; double& area = it->FastGetSolutionStepValue(rAreaVar); double& is_visited = it->GetValue(IS_VISITED); if(area > 1e-20 && is_visited != 1.0) //this implies that node was computed at the current level and not before { double& distance = it->FastGetSolutionStepValue(rDistanceVar); distance /= area; is_visited = 1.0; } } } KRATOS_CATCH("") } void AssignDistanceSign(ModelPart& rModelPart, const Variable<double>& rDistanceVar, const Variable<double>& rAreaVar, const double MaxDistance) { KRATOS_TRY //*****************************************************************+ //*****************************************************************+ //*****************************************************************+ //assign the sign to the distance function according to the original distribution. Set to max for nodes that were not calculated const int node_size = rModelPart.Nodes().size(); #pragma omp parallel for for(int i = 0; i<node_size; i++) { ModelPart::NodesContainerType::iterator it=rModelPart.NodesBegin()+i; const double area = it->FastGetSolutionStepValue(rAreaVar); double& dist = it->FastGetSolutionStepValue(rDistanceVar); if(dist < 0.0) KRATOS_THROW_ERROR(std::logic_error,"IMPOSSIBLE negative distance found !!",""); if(dist > MaxDistance || area <1e-20) //if(dist > max_distance) dist = MaxDistance; if(it->GetValue(IS_FLUID) == 1.0) dist = -fabs(dist); else dist = fabs(dist); } KRATOS_CATCH("") } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ParallelDistanceCalculator<TDim>& operator=(ParallelDistanceCalculator<TDim> const& rOther) {}; /// Copy constructor. ParallelDistanceCalculator(ParallelDistanceCalculator<TDim> const& rOther) {}; ///@} }; // Class ParallelDistanceCalculator ///@} ///@name Type Definitions ///@{ template< unsigned int TDim> const Kratos::Flags ParallelDistanceCalculator<TDim>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(0)); template< unsigned int TDim> const Kratos::Flags ParallelDistanceCalculator<TDim>::NOT_CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(0, false)); ///@} ///@name Input and output ///@{ /// input stream function template<unsigned int TDim> inline std::istream& operator >> (std::istream& rIStream, ParallelDistanceCalculator<TDim>& rThis) { return rIStream; } /// output stream function template<unsigned int TDim> inline std::ostream& operator << (std::ostream& rOStream, const ParallelDistanceCalculator<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_PARALLEL_DISTANCE_CALCULATOR_H_INCLUDED defined
parallel_measurement.c
/* Calculating the value of pi using reduction : Parallel Implementation Author : Omkar Damle. Date : August 2016. */ #include<stdio.h> #include<math.h> #include<omp.h> #include<time.h> #include<string.h> #include<stdlib.h> // Using the MONOTONIC clock #define CLK CLOCK_MONOTONIC /* Function to compute the difference between two points in time */ struct timespec diff(struct timespec start, struct timespec end); /* Function to computes the difference between two time instances Taken from - http://www.guyrutenberg.com/2007/09/22/profiling-code-using-clock_gettime/ Further reading: http://stackoverflow.com/questions/6749621/how-to-create-a-high-resolution-timer-in-linux-to-measure-program-performance http://stackoverflow.com/questions/3523442/difference-between-clock-realtime-and-clock-monotonic */ struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if((end.tv_nsec-start.tv_nsec)<0){ temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else{ temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } int main(int argc, char* argv[]) { struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg; /* Should start before anything else */ clock_gettime(CLK, &start_e2e); /* Check if enough command-line arguments are taken in. */ if(argc < 3){ printf( "Usage: %s n p \n", argv[0] ); return -1; } int n=atoi(argv[1]); /* size of input array */ int p=atoi(argv[2]); /* number of processors*/ char *problem_name = "matrix_multiplication"; char *approach_name = "omp_parallel"; // char buffer[10]; // FILE* inputFile; FILE* outputFile; // inputFile = fopen(argv[3],"r"); char outputFileName[50]; sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]); int *a[n],*b[n],*c[n]; //counters for loops int i,j,k; //putting values in the matrices; for(i = 0;i < n;i++){ a[i] = (int *) malloc(n * sizeof(int)); b[i] = (int *) malloc(n * sizeof(int)); c[i] = (int *) malloc(n * sizeof(int)); for(j = 0; j < n; j++){ a[i][j] = 1; b[i][j] = 1; c[i][j] = 0; } } //Setting parameters for parallelizing the code clock_gettime(CLK, &start_alg); /* Start the algo timer */ /*----------------------Core algorithm starts here----------------------------------------------*/ omp_set_num_threads(p); //Matrix multiplication //#pragma omp parallel private(i,j,k) //{ //int id = omp_get_thread_num(); //int start = id*(n/p); //int end = (id+1)*(n/p); //if(id == p-1) // end = n; //printf("I'm here, %d\n", id); //for(i=start;i<end;i++){ #pragma omp for private(i,j,k) for(i=0;i<n;i++){ for(j=0;j<n;j++){ for(k=0;k<n;k++){ // printf("(%d,%d,%d)\n", i,j,k); c[i][j] += a[i][k]*b[k][j]; } } } //} /*----------------------Core algorithm finished--------------------------------------------------*/ clock_gettime(CLK, &end_alg); /* End the algo timer */ /* Ensure that only the algorithm is present between these two timers. Further, the whole algorithm should be present. */ /* Should end before anything else (printing comes later) */ clock_gettime(CLK, &end_e2e); e2e = diff(start_e2e, end_e2e); alg = diff(start_alg, end_alg); /*-----------REMOVE THIS SEGMENT. ONLY FOR DEBUGGING----------------*/ for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%d ", c[i][j]); printf("\n"); } outputFile = fopen(outputFileName,"w"); // fprintf(outputFile,"%.8f\n",pi); /* problem_name,approach_name,n,p,e2e_sec,e2e_nsec,alg_sec,alg_nsec Change problem_name to whatever problem you've been assigned Change approach_name to whatever approach has been assigned p should be 0 for serial codes!! */ printf("%s,%s,%d,%d,%d,%ld,%d,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); return 0; }
DemBonesExt.h
/////////////////////////////////////////////////////////////////////////////// // Dem Bones - Skinning Decomposition Library // // Copyright (c) 2019, Electronic Arts. All rights reserved. // /////////////////////////////////////////////////////////////////////////////// #ifndef DEM_BONES_EXT #define DEM_BONES_EXT #include "DemBones.h" #include <Eigen/Geometry> #ifndef DEM_BONES_MAT_BLOCKS #include "MatBlocks.h" #define DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED #endif namespace Dem { /** @class DemBonesExt DemBonesExt.h "DemBones/DemBonesExt.h" @brief Extended class to handle hierarchical skeleton with local rotations/translations and bind matrices @details Call computeRTB() to get local rotations/translations and bind matrices after skinning decomposition is done and other data is set. @b _Scalar is the floating-point data type. @b _AniMeshScalar is the floating-point data type of mesh sequence #v. */ template<class _Scalar, class _AniMeshScalar> class DemBonesExt: public DemBones<_Scalar, _AniMeshScalar> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Matrix4=Eigen::Matrix<_Scalar, 4, 4>; using Matrix3=Eigen::Matrix<_Scalar, 3, 3>; using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>; using Vector4=Eigen::Matrix<_Scalar, 4, 1>; using Vector3=Eigen::Matrix<_Scalar, 3, 1>; using SparseMatrix=Eigen::SparseMatrix<_Scalar>; using Triplet=Eigen::Triplet<_Scalar>; using DemBones<_Scalar, _AniMeshScalar>::nIters; using DemBones<_Scalar, _AniMeshScalar>::nInitIters; using DemBones<_Scalar, _AniMeshScalar>::nTransIters; using DemBones<_Scalar, _AniMeshScalar>::transAffine; using DemBones<_Scalar, _AniMeshScalar>::transAffineNorm; using DemBones<_Scalar, _AniMeshScalar>::nWeightsIters; using DemBones<_Scalar, _AniMeshScalar>::nnz; using DemBones<_Scalar, _AniMeshScalar>::weightsSmooth; using DemBones<_Scalar, _AniMeshScalar>::weightsSmoothStep; using DemBones<_Scalar, _AniMeshScalar>::weightEps; using DemBones<_Scalar, _AniMeshScalar>::nV; using DemBones<_Scalar, _AniMeshScalar>::nB; using DemBones<_Scalar, _AniMeshScalar>::nS; using DemBones<_Scalar, _AniMeshScalar>::nF; using DemBones<_Scalar, _AniMeshScalar>::fStart; using DemBones<_Scalar, _AniMeshScalar>::subjectID; using DemBones<_Scalar, _AniMeshScalar>::u; using DemBones<_Scalar, _AniMeshScalar>::w; using DemBones<_Scalar, _AniMeshScalar>::m; using DemBones<_Scalar, _AniMeshScalar>::v; using DemBones<_Scalar, _AniMeshScalar>::fv; using DemBones<_Scalar, _AniMeshScalar>::iter; using DemBones<_Scalar, _AniMeshScalar>::iterTransformations; using DemBones<_Scalar, _AniMeshScalar>:: iterWeights; //! Timestamps for bone transformations #m, [@c size] = #nS, #fTime(@p k) is the timestamp of frame @p k Eigen::VectorXd fTime; //! Name of bones, [@c size] = #nB, #boneName(@p j) is the name bone of @p j std::vector<std::string> boneName; //! Parent bone index, [@c size] = #nB, #parent(@p j) is the index of parent bone of @p j, #parent(@p j) = -1 if @p j has no parent. Eigen::VectorXi parent; //! Original bind pre-matrix, [@c size] = [4*#nS, 4*#nB], #bind.@a block(4*@p s, 4*@p j, 4, 4) is the global bind matrix of bone @p j on subject @p s at the rest pose MatrixX bind; //! Inverse pre-multiplication matrices, [@c size] = [4*#nS, 4*#nB], #preMulInv.@a block(4*@p s, 4*@p j, 4, 4) is the inverse of pre-local transformation of bone @p j on subject @p s MatrixX preMulInv; //! Rotation order, [@c size] = [3*#nS, #nB], #rotOrder.@a col(@p j).@a segment<3>(3*@p s) is the rotation order of bone @p j on subject @p s, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order Eigen::MatrixXi rotOrder; //! Bind transformation update, 0=keep original, 1=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity int bindUpdate; /** @brief Constructor and setting default parameters */ DemBonesExt(): bindUpdate(0) { clear(); } /** @brief Clear all data */ void clear() { fTime.resize(0); boneName.resize(0); parent.resize(0); bind.resize(0, 0); preMulInv.resize(0, 0); rotOrder.resize(0, 0); DemBones::clear(); } /** @brief Local rotations, translations and global bind matrices of a subject @details Required all data in the base class: #u, #fv, #nV, #v, #nF, #fStart, #subjectID, #nS, #m, #w, #nB This function will initialize these default values for missing attributes: - #parent: -1 vector, [@c size] = #nB - #preMulInv: 4*4 identity matrix blocks, [@c size] = [4*#nS, 4*#nB] - #rotOrder: {0, 1, 2} vector blocks, [@c size] = [3*#nS, #nB] @param[in] s is the subject index @param[out] lr is the [3*@p nFr, #nB] by-reference output local rotations, @p lr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j at frame @p k @param[out] lt is the [3*@p nFr, #nB] by-reference output local translations, @p lt.@a col(@p j).segment<3>(3*@p k) is the (@c tx, @c ty, @c tz) of bone @p j at frame @p k @param[out] gb is the [4, 4*#nB] by-reference output global bind matrices, @p gb.@a block(0, 4*@p j, 4, 4) is the bind matrix of bone j @param[out] lbr is the [3, #nB] by-reference output local rotations at bind pose @p lbr.@a col(@p j).segment<3>(3*@p k) is the (@c rx, @c ry, @c rz) of bone @p j @param[out] lbt is the [3, #nB] by-reference output local translations at bind pose, @p lbt.@a col(@p j).segment<3>(3**3k) is the (@c tx, @c ty, @c tz) of bone @p j @param[in] degreeRot=true will output rotations in degree, otherwise output in radian */ void computeRTB(int s, MatrixX& lr, MatrixX& lt, MatrixX& gb, MatrixX& lbr, MatrixX& lbt, bool degreeRot=true) { computeBind(s, gb); if (parent.size() != nB) { // Preserve any existing values Eigen::VectorXi temp = Eigen::VectorXi::Constant(nB, -1); temp.head(parent.size()) = parent; parent = temp; } if (preMulInv.size() != 16 * nS * nB) { // Preserve any existing values MatrixX temp = MatrixX::Identity(4, 4).replicate(nS, nB); temp.block(0, 0, preMulInv.rows(), preMulInv.cols()) = preMulInv; preMulInv = temp; } if (rotOrder.size() != 3 * nS * nB) { // Preserve any existing values Eigen::MatrixXi temp = Eigen::Vector3i(0, 1, 2).replicate(nS, nB); temp.block(0, 0, rotOrder.rows(), rotOrder.cols()) = rotOrder; rotOrder = temp; } int nFs=fStart(s+1)-fStart(s); lr.resize(nFs*3, nB); lt.resize(nFs*3, nB); lbr.resize(3, nB); lbt.resize(3, nB); MatrixX lm(4*nFs, 4*nB); #pragma omp parallel for for (int j=0; j<nB; j++) { Eigen::Vector3i ro=rotOrder.col(j).template segment<3>(s*3); Matrix4 lb; if (parent(j)==-1) lb=preMulInv.blk4(s, j)*gb.blk4(0, j); else lb=preMulInv.blk4(s, j)*gb.blk4(0, parent(j)).inverse()*gb.blk4(0, j); Vector3 curRot=Vector3::Zero(); toRot(lb.template topLeftCorner<3, 3>(), curRot, ro); lbr.col(j)=curRot; lbt.col(j)=lb.template topRightCorner<3, 1>(); Matrix4 lm; for (int k=0; k<nFs; k++) { if (parent(j)==-1) lm=preMulInv.blk4(s, j)*m.blk4(k+fStart(s), j)*gb.blk4(0, j); else lm=preMulInv.blk4(s, j)*(m.blk4(k+fStart(s), parent(j))*gb.blk4(0, parent(j))).inverse()*m.blk4(k+fStart(s), j)*gb.blk4(0, j); toRot(lm.template topLeftCorner<3, 3>(), curRot, ro); lr.vec3(k, j)=curRot; lt.vec3(k, j)=lm.template topRightCorner<3, 1>(); } } if (degreeRot) { lr*=180/EIGEN_PI; lbr*=180/EIGEN_PI; } } private: /** p-norm centroids (using #transAffineNorm) and rotations to identity @param s is the subject index @param b is the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j */ void computeCentroids(int s, MatrixX& b) { MatrixX c=MatrixX::Zero(4, nB); for (int i=0; i<nV; i++) for (typename SparseMatrix::InnerIterator it(w, i); it; ++it) c.col(it.row())+=pow(it.value(), transAffineNorm)*u.vec3(s, i).homogeneous(); b=MatrixX::Identity(4, 4).replicate(1, nB); for (int j=0; j<nB; j++) if (c(3, j)!=0) b.transVec(0, j)=c.col(j).template head<3>()/c(3, j); } /** Global bind pose @param s is the subject index @param bindUpdate is the type of bind pose update, 0=keep original, 1=set translations to p-norm centroids (using #transAffineNorm) and rotations to identity @param b is the the [4, 4*#nB] by-reference output global bind matrices, #b.#a block(0, 4*@p j, 4, 4) is the bind matrix of bone @p j */ void computeBind(int s, MatrixX& b) { if (bind.size() != nS * 4, nB * 4) { MatrixX bindOrig = bind; bind.resize(nS*4, nB*4); MatrixX b; for (int k=0; k<nS; k++) computeCentroids(k, b); bind.block(4*s, 0, 4, 4*nB)=b; // Override bind pose with existing bind pose if preserving existing bones bind.block(0, 0, bindOrig.rows(), bindOrig.cols()) = bindOrig; } switch (bindUpdate) { case 0: b=bind.block(4*s, 0, 4, 4*nB); break; case 1: computeCentroids(s, b); break; } } /** Euler angles from rotation matrix @param rMat is the 3*3 rotation matrix @param curRot is the input current Euler angles, it is also the by-reference output closet Euler angles correspond to @p rMat @param ro is the rotation order, 0=@c X, 1=@c Y, 2=@c Z, e.g. {0, 1, 2} is @c XYZ order @param eps is the epsilon */ void toRot(const Matrix3& rMat, Vector3& curRot, const Eigen::Vector3i& ro, _Scalar eps=_Scalar(1e-10)) { Vector3 r0=rMat.eulerAngles(ro(2), ro(1), ro(0)).reverse(); _Scalar gMin=(r0-curRot).squaredNorm(); Vector3 rMin=r0; Vector3 r; Matrix3 tmpMat; for (int fx=-1; fx<=1; fx+=2) for (_Scalar sx=-2*EIGEN_PI; sx<2.1*EIGEN_PI; sx+=EIGEN_PI) { r(0)=fx*r0(0)+sx; for (int fy=-1; fy<=1; fy+=2) for (_Scalar sy=-2*EIGEN_PI; sy<2.1*EIGEN_PI; sy+=EIGEN_PI) { r(1)=fy*r0(1)+sy; for (int fz=-1; fz<=1; fz+=2) for (_Scalar sz=-2*EIGEN_PI; sz<2.1*EIGEN_PI; sz+=EIGEN_PI) { r(2)=fz*r0(2)+sz; tmpMat=Matrix3(Eigen::AngleAxis<_Scalar>(r(ro(2)), Vector3::Unit(ro(2))))* Eigen::AngleAxis<_Scalar>(r(ro(1)), Vector3::Unit(ro(1)))* Eigen::AngleAxis<_Scalar>(r(ro(0)), Vector3::Unit(ro(0))); if ((tmpMat-rMat).squaredNorm()<eps) { _Scalar tmp=(r-curRot).squaredNorm(); if (tmp<gMin) { gMin=tmp; rMin=r; } } } } } curRot=rMin; } }; } #ifdef DEM_BONES_DEM_BONES_EXT_MAT_BLOCKS_UNDEFINED #undef blk4 #undef rotMat #undef transVec #undef vec3 #undef DEM_BONES_MAT_BLOCKS #endif #undef rotMatFromEuler #endif
clip.c
#include "msghandling.h" #include "zgl.h" /* fill triangle profile */ /* #define PROFILE */ #define CLIP_XMIN (1 << 0) #define CLIP_XMAX (1 << 1) #define CLIP_YMIN (1 << 2) #define CLIP_YMAX (1 << 3) #define CLIP_ZMIN (1 << 4) #define CLIP_ZMAX (1 << 5) static void gl_transform_to_viewport_clip_c(GLVertex* v) { /* MARK: NOT_INLINED_IN_OG*/ GLContext* c = gl_get_context(); /* coordinates */ { GLfloat winv = 1.0 / v->pc.W; v->zp.x = (GLint)(v->pc.X * winv * c->viewport.scale.X + c->viewport.trans.X); v->zp.y = (GLint)(v->pc.Y * winv * c->viewport.scale.Y + c->viewport.trans.Y); v->zp.z = (GLint)(v->pc.Z * winv * c->viewport.scale.Z + c->viewport.trans.Z); } /* color */ v->zp.r = (GLint)(v->color.v[0] * COLOR_CORRECTED_MULT_MASK + COLOR_MIN_MULT) & COLOR_MASK; v->zp.g = (GLint)(v->color.v[1] * COLOR_CORRECTED_MULT_MASK + COLOR_MIN_MULT) & COLOR_MASK; v->zp.b = (GLint)(v->color.v[2] * COLOR_CORRECTED_MULT_MASK + COLOR_MIN_MULT) & COLOR_MASK; /* texture */ if (c->texture_2d_enabled) { v->zp.s = (GLint)(v->tex_coord.X * (ZB_POINT_S_MAX - ZB_POINT_S_MIN) + ZB_POINT_S_MIN); v->zp.t = (GLint)(v->tex_coord.Y * (ZB_POINT_T_MAX - ZB_POINT_T_MIN) + ZB_POINT_T_MIN); } } #define clip_funcdef(name, sign, dir, dir1, dir2) \ static GLfloat name(V4* c, V4* a, V4* b) { \ GLfloat t, dX, dY, dZ, dW, den; \ dX = (b->X - a->X); \ dY = (b->Y - a->Y); \ dZ = (b->Z - a->Z); \ dW = (b->W - a->W); \ den = -(sign d##dir) + dW; \ if (den == 0) \ t = 0; \ else \ t = (sign a->dir - a->W) / den; \ c->dir1 = a->dir1 + t * d##dir1; \ c->dir2 = a->dir2 + t * d##dir2; \ c->W = a->W + t * dW; \ c->dir = sign c->W; \ return t; \ } clip_funcdef(clip_xmin, -, X, Y, Z) clip_funcdef(clip_xmax, +, X, Y, Z) clip_funcdef(clip_ymin, -, Y, X, Z) clip_funcdef(clip_ymax, +, Y, X, Z) clip_funcdef(clip_zmin, -, Z, X, Y) clip_funcdef(clip_zmax, +, Z, X, Y) static GLfloat (*clip_proc[6])(V4*, V4*, V4*) = {clip_xmin, clip_xmax, clip_ymin, clip_ymax, clip_zmin, clip_zmax}; /* point */ #if TGL_FEATURE_ALT_RENDERMODES == 1 static void gl_add_select1(GLint z1, GLint z2, GLint z3) { GLint min, max; min = max = z1; if (z2 < min) min = z2; if (z3 < min) min = z3; if (z2 > max) max = z2; if (z3 > max) max = z3; gl_add_select(0xffffffff - min, 0xffffffff - max); } #else #define gl_add_select1(a, b, c) /*a comment*/ #endif void gl_draw_point(GLVertex* p0) { GLContext* c = gl_get_context(); if (p0->clip_code == 0) { #if TGL_FEATURE_ALT_RENDERMODES == 1 if (c->render_mode == GL_SELECT) { gl_add_select(p0->zp.z, p0->zp.z); } else if (c->render_mode == GL_FEEDBACK) { gl_add_feedback(GL_POINT_TOKEN, p0, NULL, NULL, 0); } else #endif { ZB_plot(c->zb, &p0->zp); } } } /* line */ /* * Line Clipping */ static void GLinterpolate(GLVertex* q, GLVertex* p0, GLVertex* p1, GLfloat t) { GLint i; q->pc.X = p0->pc.X + (p1->pc.X - p0->pc.X) * t; q->pc.Y = p0->pc.Y + (p1->pc.Y - p0->pc.Y) * t; q->pc.Z = p0->pc.Z + (p1->pc.Z - p0->pc.Z) * t; q->pc.W = p0->pc.W + (p1->pc.W - p0->pc.W) * t; #pragma omp simd for (i = 0; i < 3; i++) q->color.v[i] = p0->color.v[i] + (p1->color.v[i] - p0->color.v[i]) * t; } /* Line Clipping algorithm from 'Computer Graphics', Principles and Practice */ static GLint ClipLine1(GLfloat denom, GLfloat num, GLfloat* tmin, GLfloat* tmax) { GLfloat t; if (denom > 0) { t = num / denom; if (t > *tmax) return 0; if (t > *tmin) *tmin = t; } else if (denom < 0) { t = num / denom; if (t < *tmin) return 0; if (t < *tmax) *tmax = t; } else if (num > 0) return 0; return 1; } void gl_draw_line(GLVertex* p1, GLVertex* p2) { GLContext* c = gl_get_context(); GLfloat dx, dy, dz, dw, x1, y1, z1, w1; GLVertex q1, q2; GLint cc1, cc2; cc1 = p1->clip_code; cc2 = p2->clip_code; if ((cc1 | cc2) == 0) { #if TGL_FEATURE_ALT_RENDERMODES == 1 if (c->render_mode == GL_SELECT) { gl_add_select1(p1->zp.z, p2->zp.z, p2->zp.z); } else if (c->render_mode == GL_FEEDBACK) { gl_add_feedback(GL_LINE_TOKEN, p1, p2, NULL, 0); } else #endif { if (c->zb->depth_test) ZB_line_z(c->zb, &p1->zp, &p2->zp); else ZB_line(c->zb, &p1->zp, &p2->zp); } } else if ((cc1 & cc2) != 0) { return; } else { dx = p2->pc.X - p1->pc.X; dy = p2->pc.Y - p1->pc.Y; dz = p2->pc.Z - p1->pc.Z; dw = p2->pc.W - p1->pc.W; x1 = p1->pc.X; y1 = p1->pc.Y; z1 = p1->pc.Z; w1 = p1->pc.W; GLfloat tmin = 0; GLfloat tmax = 1; if (ClipLine1(dx + dw, -x1 - w1, &tmin, &tmax) && ClipLine1(-dx + dw, x1 - w1, &tmin, &tmax) && ClipLine1(dy + dw, -y1 - w1, &tmin, &tmax) && ClipLine1(-dy + dw, y1 - w1, &tmin, &tmax) && ClipLine1(dz + dw, -z1 - w1, &tmin, &tmax) && ClipLine1(-dz + dw, z1 - w1, &tmin, &tmax)) { GLinterpolate(&q1, p1, p2, tmin); GLinterpolate(&q2, p1, p2, tmax); gl_transform_to_viewport_clip_c(&q1); gl_transform_to_viewport_clip_c(&q2); #if TGL_FEATURE_ALT_RENDERMODES == 1 if (c->render_mode == GL_SELECT) { gl_add_select1(q1.zp.z, q2.zp.z, q2.zp.z); } else if (c->render_mode == GL_FEEDBACK) { gl_add_feedback(GL_LINE_TOKEN, &q1, &q2, NULL, 0); } else #endif { if (c->zb->depth_test) ZB_line_z(c->zb, &q1.zp, &q2.zp); else ZB_line(c->zb, &q1.zp, &q2.zp); } } } } /*Triangles*/ static void updateTmp(GLVertex* q, GLVertex* p0, GLVertex* p1, GLfloat t) { GLContext* c = gl_get_context(); { q->color.v[0] = p0->color.v[0] + (p1->color.v[0] - p0->color.v[0]) * t; q->color.v[1] = p0->color.v[1] + (p1->color.v[1] - p0->color.v[1]) * t; q->color.v[2] = p0->color.v[2] + (p1->color.v[2] - p0->color.v[2]) * t; } #if TGL_OPTIMIZATION_HINT_BRANCH_COST < 1 if (c->texture_2d_enabled) #endif { q->tex_coord.X = p0->tex_coord.X + (p1->tex_coord.X - p0->tex_coord.X) * t; q->tex_coord.Y = p0->tex_coord.Y + (p1->tex_coord.Y - p0->tex_coord.Y) * t; } q->clip_code = gl_clipcode(q->pc.X, q->pc.Y, q->pc.Z, q->pc.W); if (q->clip_code == 0) gl_transform_to_viewport_clip_c(q); } static void gl_draw_triangle_clip(GLVertex* p0, GLVertex* p1, GLVertex* p2, GLint clip_bit); void gl_draw_triangle(GLVertex* p0, GLVertex* p1, GLVertex* p2) { GLContext* c = gl_get_context(); GLint co, cc[3], front; cc[0] = p0->clip_code; cc[1] = p1->clip_code; cc[2] = p2->clip_code; co = cc[0] | cc[1] | cc[2]; /* we handle the non clipped case here to go faster */ if (co == 0) { GLfloat norm; norm = (GLfloat)(p1->zp.x - p0->zp.x) * (GLfloat)(p2->zp.y - p0->zp.y) - (GLfloat)(p2->zp.x - p0->zp.x) * (GLfloat)(p1->zp.y - p0->zp.y); if (norm == 0) return; front = norm < 0.0; front = front ^ c->current_front_face; /* back face culling */ if (c->cull_face_enabled) { /* most used case first */ if (c->current_cull_face == GL_BACK) { if (front == 0) return; c->draw_triangle_front(p0, p1, p2); } else if (c->current_cull_face == GL_FRONT) { if (front != 0) return; c->draw_triangle_back(p0, p1, p2); } else { return; } } else { /* no culling */ if (front) { c->draw_triangle_front(p0, p1, p2); } else { c->draw_triangle_back(p0, p1, p2); } } } else { /* GLint c_and = cc[0] & cc[1] & cc[2];*/ if ((cc[0] & cc[1] & cc[2]) == 0) { /* Don't draw a triangle with no points*/ gl_draw_triangle_clip(p0, p1, p2, 0); } } } static void gl_draw_triangle_clip(GLVertex* p0, GLVertex* p1, GLVertex* p2, GLint clip_bit) { GLint co, c_and, co1, cc[3], edge_flag_tmp, clip_mask; GLVertex* q[3]; cc[0] = p0->clip_code; cc[1] = p1->clip_code; cc[2] = p2->clip_code; co = cc[0] | cc[1] | cc[2]; if (co == 0) { gl_draw_triangle(p0, p1, p2); } else { c_and = cc[0] & cc[1] & cc[2]; /* the triangle is completely outside */ if (c_and != 0) return; /* find the next direction to clip */ while (clip_bit < 6 && (co & (1 << clip_bit)) == 0) { clip_bit++; } /* this test can be true only in case of rounding errors */ if (clip_bit == 6) { /* The 2 bit and the 4 bit.*/ #if 0 tgl_warning("Error:\n");tgl_warning("%f %f %f %f\n",p0->pc.X,p0->pc.Y,p0->pc.Z,p0->pc.W);tgl_warning("%f %f %f %f\n",p1->pc.X,p1->pc.Y,p1->pc.Z,p1->pc.W);tgl_warning("%f %f %f %f\n",p2->pc.X,p2->pc.Y,p2->pc.Z,p2->pc.W); #endif return; } clip_mask = 1 << clip_bit; co1 = (cc[0] ^ cc[1] ^ cc[2]) & clip_mask; if (co1) { /* one point outside */ if (cc[0] & clip_mask) { q[0] = p0; q[1] = p1; q[2] = p2; } else if (cc[1] & clip_mask) { q[0] = p1; q[1] = p2; q[2] = p0; } else { q[0] = p2; q[1] = p0; q[2] = p1; } { GLVertex tmp1, tmp2; GLfloat tt; tt = clip_proc[clip_bit](&tmp1.pc, &q[0]->pc, &q[1]->pc); updateTmp(&tmp1, q[0], q[1], tt); tt = clip_proc[clip_bit](&tmp2.pc, &q[0]->pc, &q[2]->pc); updateTmp(&tmp2, q[0], q[2], tt); tmp1.edge_flag = q[0]->edge_flag; edge_flag_tmp = q[2]->edge_flag; q[2]->edge_flag = 0; gl_draw_triangle_clip(&tmp1, q[1], q[2], clip_bit + 1); tmp2.edge_flag = 1; tmp1.edge_flag = 0; q[2]->edge_flag = edge_flag_tmp; gl_draw_triangle_clip(&tmp2, &tmp1, q[2], clip_bit + 1); } } else { /* two points outside */ if ((cc[0] & clip_mask) == 0) { q[0] = p0; q[1] = p1; q[2] = p2; } else if ((cc[1] & clip_mask) == 0) { q[0] = p1; q[1] = p2; q[2] = p0; } else { q[0] = p2; q[1] = p0; q[2] = p1; } { GLVertex tmp1, tmp2; GLfloat tt; tt = clip_proc[clip_bit](&tmp1.pc, &q[0]->pc, &q[1]->pc); updateTmp(&tmp1, q[0], q[1], tt); tt = clip_proc[clip_bit](&tmp2.pc, &q[0]->pc, &q[2]->pc); updateTmp(&tmp2, q[0], q[2], tt); tmp1.edge_flag = 1; tmp2.edge_flag = q[2]->edge_flag; gl_draw_triangle_clip(q[0], &tmp1, &tmp2, clip_bit + 1); } } } } /* see vertex.c to see how the draw functions are assigned.*/ void gl_draw_triangle_select(GLVertex* p0, GLVertex* p1, GLVertex* p2) { gl_add_select1(p0->zp.z, p1->zp.z, p2->zp.z); } void gl_draw_triangle_feedback(GLVertex* p0, GLVertex* p1, GLVertex* p2) { gl_add_feedback(GL_POLYGON_TOKEN, p0, p1, p2, 0); } #ifdef PROFILE int count_triangles, count_triangles_textured, count_pixels; #warning "Compile with PROFILE slows down everything" #endif /* see vertex.c to see how the draw functions are assigned.*/ void gl_draw_triangle_fill(GLVertex* p0, GLVertex* p1, GLVertex* p2) { GLContext* c = gl_get_context(); if (c->texture_2d_enabled) { /* if(c->current_texture)*/ #if TGL_FEATURE_LIT_TEXTURES == 1 if (c->current_shade_model != GL_SMOOTH) { p1->zp.r = p2->zp.r; p1->zp.g = p2->zp.g; p1->zp.b = p2->zp.b; p0->zp.r = p2->zp.r; p0->zp.g = p2->zp.g; p0->zp.b = p2->zp.b; } #endif ZB_setTexture(c->zb, c->current_texture->images[0].pixmap); #if TGL_FEATURE_BLEND == 1 if (c->zb->enable_blend) ZB_fillTriangleMappingPerspective(c->zb, &p0->zp, &p1->zp, &p2->zp); else ZB_fillTriangleMappingPerspectiveNOBLEND(c->zb, &p0->zp, &p1->zp, &p2->zp); #else ZB_fillTriangleMappingPerspectiveNOBLEND(c->zb, &p0->zp, &p1->zp, &p2->zp); #endif } else if (c->current_shade_model == GL_SMOOTH) { #if TGL_FEATURE_BLEND == 1 if (c->zb->enable_blend) ZB_fillTriangleSmooth(c->zb, &p0->zp, &p1->zp, &p2->zp); else ZB_fillTriangleSmoothNOBLEND(c->zb, &p0->zp, &p1->zp, &p2->zp); #else ZB_fillTriangleSmoothNOBLEND(c->zb, &p0->zp, &p1->zp, &p2->zp); #endif } else { #if TGL_FEATURE_BLEND == 1 if (c->zb->enable_blend) ZB_fillTriangleFlat(c->zb, &p0->zp, &p1->zp, &p2->zp); else ZB_fillTriangleFlatNOBLEND(c->zb, &p0->zp, &p1->zp, &p2->zp); #else ZB_fillTriangleFlatNOBLEND(c->zb, &p0->zp, &p1->zp, &p2->zp); #endif } } /* Render a clipped triangle in line mode */ void gl_draw_triangle_line(GLVertex* p0, GLVertex* p1, GLVertex* p2) { GLContext* c = gl_get_context(); if (c->zb->depth_test) { if (p0->edge_flag) ZB_line_z(c->zb, &p0->zp, &p1->zp); if (p1->edge_flag) ZB_line_z(c->zb, &p1->zp, &p2->zp); if (p2->edge_flag) ZB_line_z(c->zb, &p2->zp, &p0->zp); } else { if (p0->edge_flag) ZB_line(c->zb, &p0->zp, &p1->zp); if (p1->edge_flag) ZB_line(c->zb, &p1->zp, &p2->zp); if (p2->edge_flag) ZB_line(c->zb, &p2->zp, &p0->zp); } } /* Render a clipped triangle in point mode */ void gl_draw_triangle_point(GLVertex* p0, GLVertex* p1, GLVertex* p2) { GLContext* c = gl_get_context(); if (p0->edge_flag) ZB_plot(c->zb, &p0->zp); if (p1->edge_flag) ZB_plot(c->zb, &p1->zp); if (p2->edge_flag) ZB_plot(c->zb, &p2->zp); }
region_localization_objective.h
#pragma once #ifndef OPTIMIZATION_LIB_REGION_LOCALIZATION_OBJECTIVE_H #define OPTIMIZATION_LIB_REGION_LOCALIZATION_OBJECTIVE_H // STL includes #include <vector> // Optimization lib includes #include "../data_providers/plain_data_provider.h" #include "./dense_objective_function.h" // Spectra #include <Spectra/SymEigsShiftSolver.h> #include <Spectra/MatOp/SparseSymShiftSolve.h> #include <Spectra/MatOp/DenseSymMatProd.h> #include <Spectra/MatOp/SparseSymMatProd.h> #include <Spectra/SymEigsSolver.h> #include <Spectra/SymGEigsSolver.h> #include <Spectra/MatOp/SparseCholesky.h> template<Eigen::StorageOptions StorageOrder_> class RegionLocalizationObjective : public DenseObjectiveFunction<StorageOrder_> { public: /** * Public type definitions */ enum class Properties : int32_t { Delta = DenseObjectiveFunction<StorageOrder_>::Properties::Count_, ValuePerEdge }; /** * Constructors and destructor */ RegionLocalizationObjective(const std::shared_ptr<MeshDataProvider>& mesh_data_provider, const Eigen::VectorXd& mu, const std::shared_ptr<EmptyDataProvider>& empty_data_provider) : DenseObjectiveFunction(mesh_data_provider, empty_data_provider, "Region Localization", 0, false), mu_(mu), tau_(10 * mu.coeff(0)) { half_tau_ = tau_ / 2; this->Initialize(); } virtual ~RegionLocalizationObjective() { } /** * Setters */ /** * Getters */ double GetTau() const { return tau_; } const Eigen::VectorXd& GetSigma() const { return sigma_; } const Eigen::VectorXd& GetMu() const { return mu_; } const Eigen::VectorXd& GetLambda() const { return lambda_; } private: /** * Overrides */ void CalculateValue(double& f) override { f = 0; for(int64_t i = 0; i < lambda_.rows(); i++) { double diff = lambda_.coeff(i) - mu_.coeff(i); double factor = 1 / (mu_.coeff(i) * mu_.coeff(i)); f += factor * (diff * diff); } } void CalculateValuePerVertex(Eigen::VectorXd& f_per_vertex) override { } void CalculateValuePerEdge(Eigen::VectorXd& domain_value_per_edge, Eigen::VectorXd& image_value_per_edge) override { } void CalculateGradient(Eigen::VectorXd& g) override { Eigen::MatrixXd phi_squared = Eigen::MatrixXd::Zero(phi_.rows(), phi_.cols()); //#pragma omp parallel for //for(int64_t row = 0; row < phi_.rows(); row++) //{ // for (int64_t col = 0; col < phi_.cols(); col++) // { // phi_squared.coeffRef(row, col) = phi_.coeffRef(row, col) * phi_.coeffRef(row, col); // } //} phi_squared = phi_.cwiseProduct(phi_); //Eigen::VectorXd v_tanh_squared = Eigen::VectorXd::Zero(v_.rows()); // //#pragma omp parallel for //for (int64_t row = 0; row < v_.rows(); row++) //{ // v_tanh_squared.coeffRef(row) = v_tanh_.coeffRef(row) * v_tanh_.coeffRef(row); //} Eigen::VectorXd mu_squared = mu_.cwiseProduct(mu_); Eigen::VectorXd diff = (lambda_ - mu_).cwiseQuotient(mu_squared); g = 2 * phi_squared * diff; //#pragma omp parallel for //for (int64_t row = 0; row < v_.rows(); row++) //{ // g.coeffRef(row) = half_tau_ * g.coeffRef(row) * (1 - v_tanh_squared.coeffRef(row)); //} } void PreUpdate(const Eigen::VectorXd& v) override { v_ = v; v_tanh_ = v_.array().tanh(); sigma_ = half_tau_ * (v_tanh_ + Eigen::VectorXd::Ones(v_.rows())); Eigen::SparseMatrix<double> diag_v = v_.asDiagonal().toDenseMatrix().sparseView(); Eigen::SparseMatrix<double> W = this->GetMeshDataProvider()->GetLaplacian(); Eigen::SparseMatrix<double> A = this->GetMeshDataProvider()->GetMassMatrix(); Eigen::SparseMatrix<double> lhs = W + A * diag_v; Eigen::SparseMatrix<double> rhs = A; Spectra::SparseSymMatProd<double> lhs_op(lhs); Spectra::SparseCholesky<double> rhs_op(rhs); Spectra::SymGEigsSolver<double, Spectra::SMALLEST_MAGN, Spectra::SparseSymMatProd<double>, Spectra::SparseCholesky<double>, Spectra::GEIGS_CHOLESKY> geigs(&lhs_op, &rhs_op, RDS_NEV, RDS_NCV); geigs.init(); int nconv = geigs.compute(); if (geigs.info() == Spectra::SUCCESSFUL) { lambda_ = geigs.eigenvalues(); lambda_.conservativeResize(lambda_.rows() - 1); phi_ = geigs.eigenvectors(); phi_.conservativeResize(phi_.rows(), phi_.cols() - 1); } else { bla = geigs.info(); } //lambda_ = geigs.eigenvalues(); //lambda_.conservativeResize(lambda_.rows() - 1); //phi_ = geigs.eigenvectors(); //phi_.conservativeResize(phi_.rows(), phi_.cols() - 1); } void PreInitialize() override { } void InitializeTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { } void CalculateRawTriplets(std::vector<Eigen::Triplet<double>>& triplets) override { } /** * Fields */ Eigen::VectorXd v_tanh_; Eigen::VectorXd v_; Eigen::VectorXd mu_; Eigen::VectorXd lambda_; Eigen::MatrixXd phi_; Eigen::VectorXd sigma_; double tau_; double half_tau_; int bla; }; #endif
libmsr_write_test.c
/** * @author Asim YarKhan (updated) * @author Vince Weaver (original version) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "papi.h" #include "msr_core.h" #include "msr_rapl.h" #define MAX_EVENTS 128 char events[MAX_EVENTS][BUFSIZ]; char filenames[MAX_EVENTS][BUFSIZ]; int ompcpuloadprimes( int limit ) { int num, primes=0; #pragma omp parallel for schedule(dynamic) reduction(+ : primes) for (num = 1; num <= limit; num++) { int i = 2; while(i <= num) { if(num % i == 0) break; i++; } if(i == num) primes++; } return primes; } int main (int argc, char **argv) { int retval,cid,rapl_cid=-1,numcmp; int EventSet = PAPI_NULL; long long values[MAX_EVENTS]; int i,code,enum_retval; const PAPI_component_info_t *cmpinfo = NULL; long long start_time,write_start_time,write_end_time,read_start_time,read_end_time; char event_name[BUFSIZ]; union { long long ll; double dbl; } event_value_union; static int num_events=0; FILE *fileout; /* PAPI Initialization */ retval = PAPI_library_init( PAPI_VER_CURRENT ); if ( retval != PAPI_VER_CURRENT ) { fprintf(stderr,"PAPI_library_init failed\n"); exit(1); } /* Find the libmsr component */ numcmp = PAPI_num_components(); for(cid=0; cid<numcmp; cid++) { if ( (cmpinfo = PAPI_get_component_info(cid)) == NULL) { fprintf(stderr,"PAPI_get_component_info failed\n"); exit(1); } if (strstr(cmpinfo->name,"libmsr")) { rapl_cid=cid; printf("Found libmsr component at cid %d\n", rapl_cid); if (cmpinfo->disabled) { fprintf(stderr,"No libmsr events found: %s\n", cmpinfo->disabled_reason); exit(1); } break; } } /* Component not found */ if (cid==numcmp) { fprintf(stderr,"No libmsr component found\n"); exit(1); } /* Find events in the component */ code = PAPI_NATIVE_MASK; enum_retval = PAPI_enum_cmp_event( &code, PAPI_ENUM_FIRST, cid ); while ( enum_retval == PAPI_OK ) { retval = PAPI_event_code_to_name( code, event_name ); if ( retval != PAPI_OK ) { printf("Error translating %#x\n",code); exit(1); } printf("Found: %s\n",event_name); strncpy(events[num_events],event_name,BUFSIZ); sprintf(filenames[num_events],"results.%s",event_name); num_events++; if (num_events==MAX_EVENTS) { printf("Too many events! %d\n",num_events); exit(1); } enum_retval = PAPI_enum_cmp_event( &code, PAPI_ENUM_EVENTS, cid ); } if (num_events==0) { printf("Error! No libmsr events found!\n"); exit(1); } /* Open output file */ char fileoutname[]="libmsr_write_test_output.txt"; fileout=fopen( fileoutname ,"w" ); if ( fileout==NULL) { fprintf( stderr,"Could not open %s\n",fileoutname ); exit(1); } /* Create EventSet */ retval = PAPI_create_eventset( &EventSet ); if (retval != PAPI_OK) { fprintf(stderr,"Error creating eventset!\n"); } for(i=0;i<num_events;i++) { retval = PAPI_add_named_event( EventSet, events[i] ); if (retval != PAPI_OK) fprintf(stderr,"Error adding event %s\n",events[i]); } start_time=PAPI_get_real_nsec(); /* Grab the initial values for the events */ retval = PAPI_start( EventSet); if (retval != PAPI_OK) { fprintf(stderr,"PAPI_start() failed\n"); exit(1); } /* Initial checking read */ retval = PAPI_read( EventSet, values); if (retval != PAPI_OK) { fprintf(stderr,"PAPI_read() failed\n"); exit(1); } /* Write a header line */ fprintf( fileout, "ACTION TIME-STAMP TIME-FOR-UNIT-WORK TIME-OVERHEAD-RW\t" ); for(i=0; i<num_events; i++) fprintf( fileout, "%s\t", events[i]+9 ); fprintf( fileout, "\n" ); /* Read the initial values */ retval = PAPI_read( EventSet, values); if (retval != PAPI_OK) { fprintf(stderr,"PAPI_read() failed\n"); exit(1); } fprintf( fileout, "INITIAL %.3f 0\t", ((double)(PAPI_get_real_nsec()-start_time))/1.0e9 ); for(i=0; i<num_events; i++) { event_value_union.ll = values[i]; fprintf( fileout, "%.3f\t", event_value_union.dbl ); } fprintf( fileout, "\n" ); int rpt=0; int limit1base=10; int limit2base=10; while(rpt++<200) { //printf("rpt %d\n", rpt); if ( rpt % 10 == 0 ) { for (i=0; i<num_events; i++) { event_value_union.ll = values[i]; if ( !strcmp( events[i], "libmsr:::PKG_POWER_LIMIT_1:PACKAGE0" )) event_value_union.dbl=limit1base+(rpt/2); else if ( !strcmp( events[i], "libmsr:::PKG_TIME_WINDOW_POWER_LIMIT_1:PACKAGE0" )) event_value_union.dbl=1.0; else if ( !strcmp( events[i], "libmsr:::PKG_POWER_LIMIT_2:PACKAGE0" )) event_value_union.dbl=limit2base+(rpt/2); else if ( !strcmp( events[i], "libmsr:::PKG_TIME_WINDOW_POWER_LIMIT_2:PACKAGE0" )) event_value_union.dbl=1.0; else if ( !strcmp( events[i], "libmsr:::PKG_POWER_LIMIT_1:PACKAGE1" )) event_value_union.dbl=limit1base+(rpt/2); else if ( !strcmp( events[i], "libmsr:::PKG_TIME_WINDOW_POWER_LIMIT_1:PACKAGE1" )) event_value_union.dbl=1.0; else if ( !strcmp( events[i], "libmsr:::PKG_POWER_LIMIT_2:PACKAGE1" )) event_value_union.dbl=limit2base+(rpt/2); else if ( !strcmp( events[i], "libmsr:::PKG_TIME_WINDOW_POWER_LIMIT_2:PACKAGE1" )) event_value_union.dbl=1.0; else event_value_union.dbl=PAPI_NULL; values[i]=event_value_union.ll; } write_start_time=PAPI_get_real_nsec(); retval = PAPI_write( EventSet, values ); write_end_time=PAPI_get_real_nsec(); if (retval != PAPI_OK) { fprintf(stderr,"PAPI_write() failed\n"); exit(1); } fprintf( fileout, "SET %.3f \t0 \t", ((double)(PAPI_get_real_nsec()-start_time))/1.0e9 ); fprintf( fileout, "%.5e\t", ((double)(write_end_time-write_start_time))/1.0e9 ); for(i=0; i<num_events; i++) { event_value_union.ll = values[i]; fprintf( fileout, "%.3f\t", event_value_union.dbl ); } fprintf( fileout, "\n" ); } /* DO SOME WORK TO USE ENERGY */ //usleep(100000); double work_start_time=PAPI_get_real_nsec(); ompcpuloadprimes( 100000 ); double work_time=PAPI_get_real_nsec()-work_start_time; //printf("primescount %d\n", primescount); /* Read and output the values */ read_start_time=PAPI_get_real_nsec(); retval = PAPI_read( EventSet, values ); read_end_time=PAPI_get_real_nsec(); if (retval != PAPI_OK) { fprintf(stderr,"PAPI_read() failed\n"); exit(1); } fprintf( fileout, "READ %.3f\t %.3f\t", ((double)(PAPI_get_real_nsec()-start_time))/1.0e9, work_time/1.0e9 ); fprintf( fileout, "%.5e\t", ((double)(read_end_time-read_start_time))/1.0e9 ); for(i=0; i<num_events; i++) { event_value_union.ll = values[i]; fprintf( fileout, "%.3f\t", event_value_union.dbl ); } fprintf( fileout, "\n" ); } retval = PAPI_stop( EventSet, values); return 0; }
bt_onefile.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - BT This benchmark is an OpenMP C version of the NPB BT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: R. Van der Wijngaart T. Harris M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ /* #include "npb-C.h" */ /* NAS Parallel Benchmarks 2.3 OpenMP C Versions */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> /* #include <omp.h> */ /* #include "wtime.h" */ /* C/Fortran interface is different on different machines. * You may need to tweak this. */ #if defined(IBM) #define wtime wtime #elif defined(CRAY) #define wtime WTIME #else #define wtime wtime_ #endif void wtime(double *t) { static int sec = -1; struct timeval tv; gettimeofday(&tv, (struct timezone *)0); //gettimeofday(&tv, (void *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec; } #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ typedef int boolean; typedef struct { double real; double imag; } dcomplex; #define TRUE 1 #define FALSE 0 #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #define pow2(a) ((a)*(a)) #define get_real(c) c.real #define get_imag(c) c.imag #define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag) #define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag) #define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \ c.imag = a.real * b.imag + a.imag * b.real) #define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b) extern double randlc(double *, double); extern void vranlc(int, double *, double, double *); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); /* global variables #include "header.h" */ /*-------------------------------------------------------------------- c--------------------------------------------------------------------- c c header.h c c--------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c The following include file is generated automatically by the c "setparams" utility. It defines c maxcells: the square root of the maximum number of processors c problem_size: 12, 64, 102, 162 (for cclass T, A, B, C) c dt_default: default time step for this problem size if no c config file c niter_default: default number of iterations for this problem size --------------------------------------------------------------------*/ /* #include "npbparams.h" */ /* cclass = S */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the cclass of the NPB c in this directory. Do not modify it by hand. */ #define PROBLEM_SIZE 32 //#define PROBLEM_SIZE 12 #define NITER_DEFAULT 60 #define DT_DEFAULT 0.010 #define CONVERTDOUBLE FALSE #define COMPILETIME "05 Oct 2004" #define NPBVERSION "2.3" #define CS1 "gcc " #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-g" #define CS6 "-lm" #define CS7 "randdp" #define AA 0 #define BB 1 #define CC 2 #define BLOCK_SIZE 5 /* COMMON block: global */ static int grid_points[3]; /* grid_ponts(1:3) */ /* COMMON block: constants */ static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3; static double dx1, dx2, dx3, dx4, dx5; static double dy1, dy2, dy3, dy4, dy5; static double dz1, dz2, dz3, dz4, dz5; static double dssp, dt; static double ce[5][13]; /* ce(5,13) */ static double dxmax, dymax, dzmax; static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5; static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1; static double yycon1, yycon2, yycon3, yycon4, yycon5; static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1; static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5; static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1; static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345; static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp; static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2; static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6; static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16; #define IMAX PROBLEM_SIZE #define JMAX PROBLEM_SIZE #define KMAX PROBLEM_SIZE /* c to improve cache performance, grid dimensions padded by 1 c for even number sizes only. */ /* COMMON block: fields */ static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1]; static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5]; static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5]; static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5]; /* COMMON block: work_1d */ static double cuf[PROBLEM_SIZE]; static double q[PROBLEM_SIZE]; static double ue[PROBLEM_SIZE][5]; static double buf[PROBLEM_SIZE][5]; #pragma omp threadprivate(cuf, q, ue, buf) /* COMMON block: work_lhs */ static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5]; /* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */ static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5]; /* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */ static double tmp1, tmp2, tmp3; /* c to improve cache performance, grid dimensions (first two for these c to arrays) padded by 1 for even number sizes only. */ /* common block fjac, njac */ /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void compute_rhs(void); static void set_constants(void); static void verify(int no_time_steps, char *cclass, boolean *verified); static void x_solve(void); static void x_backsubstitute(void); static void x_solve_cell(void); static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]); static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]); static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]); static void binvrhs(double lhs[5][5], double r[5]); static void y_solve(void); static void y_backsubstitute(void); static void y_solve_cell(void); static void z_solve(void); static void z_backsubstitute(void); static void z_solve_cell(void); /*-------------------------------------------------------------------- program BT c-------------------------------------------------------------------*/ int main(int argc, char **argv) { int niter, step, n3; int nthreads = 1; double navg, mflops; double tmax; boolean verified; char cclass; FILE *fp; /*-------------------------------------------------------------------- c Root node reads input file (if it exists) else takes c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - BT Benchmark\n\n"); fp = fopen("inputbt.data", "r"); if (fp != NULL) { printf(" Reading from input file inputbt.data"); fscanf(fp, "%d", &niter); while (fgetc(fp) != '\n'); fscanf(fp, "%lg", &dt); while (fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]); fclose(fp); } else { printf(" No input file inputbt.data. Using compiled defaults\n"); niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if (grid_points[0] > IMAX || grid_points[1] > JMAX || grid_points[2] > KMAX) { printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); #pragma omp parallel { initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); } /* end parallel */ timer_clear(1); timer_start(1); #pragma omp parallel firstprivate(niter) private(step) { for (step = 1; step <= niter; step++) { if (step%20 == 0 || step == 1) { #pragma omp master printf(" Time step %4d\n", step); } adi(); } #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &cclass, &verified); n3 = grid_points[0]*grid_points[1]*grid_points[2]; navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; if ( tmax != 0.0 ) { mflops = 1.0e-6*(double)niter* (3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax; } else { mflops = 0.0; } c_print_results("BT", cclass, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void add(void) { /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ int i, j, k, m; #pragma omp for private(i,j,k,m) /* #pragma omp for ,by Liao, j k m will be shared by default, race condtion*/ for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { compute_rhs(); x_solve(); y_solve(); z_solve(); add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[i][j][k][m] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { add = rhs[i][j][k][m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp for private(i,j,k,m) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for private(k,i,m,eta,zeta,dtpp,im1,ip1,xi) for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m <= 4; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i < grid_points[0]-1; i++) { im1 = i-1; ip1 = i+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tx2*(ue[ip1][1]-ue[im1][1])+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { i = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (m = 0; m < 5; m++) { for (i = 1*3; i <= grid_points[0]-3*1-1; i++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for private(k,j,m,xi,eta,zeta,dtpp,jm1,jp1) for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j < grid_points[1]-1; j++) { jm1 = j-1; jp1 = j+1; forcing[i][j][k][0] = forcing[i][j][k][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { j = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (m = 0; m < 5; m++) { for (j = 1*3; j <= grid_points[1]-3*1-1; j++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for private(xi,j,eta,zeta,k,m,km1,kp1,dtpp) for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k < grid_points[2]-1; k++) { km1 = k-1; kp1 = k+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { k = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (m = 0; m < 5; m++) { for (k = 1*3; k <= grid_points[2]-3*1-1; k++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7] + xi*ce[m][10]))) + eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8] + eta*ce[m][11])))+ zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + zeta*ce[m][12]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < IMAX; i++) { for (j = 0; j < IMAX; j++) { for (k = 0; k < IMAX; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = 1.0; } } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ #pragma omp for private(xi,eta,zeta,j,k,ix,iy,iz,Pxi,m,Peta,Pzeta) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &(Pface[ix][0][0])); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ i = 0; xi = 0.0; #pragma omp for nowait private(eta,k,zeta,m) for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ i = grid_points[0]-1; xi = 1.0; #pragma omp for private(eta,k,zeta,m) for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ j = 0; eta = 0.0; #pragma omp for nowait private(xi,k,zeta,m) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ j = grid_points[1]-1; eta = 1.0; #pragma omp for private(xi,k,m,zeta) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ k = 0; zeta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ k = grid_points[2]-1; zeta = 1.0; #pragma omp for private(xi,eta,j,m) for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { int i, j, k, m, n; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zero the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m,n) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { lhs[i][j][k][0][m][n] = 0.0; lhs[i][j][k][1][m][n] = 0.0; lhs[i][j][k][2][m][n] = 0.0; } } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but convenient c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { lhs[i][j][k][1][m][m] = 1.0; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side in the xi-direction c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c determine a (labeled f) and n jacobians c-------------------------------------------------------------------*/ #pragma omp for private(k,i,tmp1,tmp2,tmp3) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (i = 0; i < grid_points[0]; i++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 1.0; fjac[ i][ j][ k][0][2] = 0.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = ( 2.0 - c2 ) * ( u[i][j][k][1] / u[i][j][k][0] ); fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 ); fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][1][4] = c2; fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][2][3] = 0.0; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][2] = 0.0; fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][1] * tmp1 ); fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( 3.0*u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 ); njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = con43 * c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in x direction c-------------------------------------------------------------------*/ for (i = 1; i < grid_points[0]-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0] - tmp1 * njac[i-1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1] - tmp1 * njac[i-1][j][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2] - tmp1 * njac[i-1][j][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3] - tmp1 * njac[i-1][j][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4] - tmp1 * njac[i-1][j][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0] - tmp1 * njac[i-1][j][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1] - tmp1 * njac[i-1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2] - tmp1 * njac[i-1][j][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3] - tmp1 * njac[i-1][j][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4] - tmp1 * njac[i-1][j][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0] - tmp1 * njac[i-1][j][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1] - tmp1 * njac[i-1][j][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2] - tmp1 * njac[i-1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3] - tmp1 * njac[i-1][j][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4] - tmp1 * njac[i-1][j][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0] - tmp1 * njac[i-1][j][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1] - tmp1 * njac[i-1][j][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2] - tmp1 * njac[i-1][j][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3] - tmp1 * njac[i-1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4] - tmp1 * njac[i-1][j][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0] - tmp1 * njac[i-1][j][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1] - tmp1 * njac[i-1][j][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2] - tmp1 * njac[i-1][j][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3] - tmp1 * njac[i-1][j][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4] - tmp1 * njac[i-1][j][k][4][4] - tmp1 * dx5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0] - tmp1 * njac[i+1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1] - tmp1 * njac[i+1][j][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2] - tmp1 * njac[i+1][j][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3] - tmp1 * njac[i+1][j][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4] - tmp1 * njac[i+1][j][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0] - tmp1 * njac[i+1][j][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1] - tmp1 * njac[i+1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2] - tmp1 * njac[i+1][j][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3] - tmp1 * njac[i+1][j][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4] - tmp1 * njac[i+1][j][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0] - tmp1 * njac[i+1][j][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1] - tmp1 * njac[i+1][j][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2] - tmp1 * njac[i+1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3] - tmp1 * njac[i+1][j][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4] - tmp1 * njac[i+1][j][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0] - tmp1 * njac[i+1][j][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1] - tmp1 * njac[i+1][j][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2] - tmp1 * njac[i+1][j][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3] - tmp1 * njac[i+1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4] - tmp1 * njac[i+1][j][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0] - tmp1 * njac[i+1][j][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1] - tmp1 * njac[i+1][j][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2] - tmp1 * njac[i+1][j][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3] - tmp1 * njac[i+1][j][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4] - tmp1 * njac[i+1][j][k][4][4] - tmp1 * dx5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the tri-diagonal matrix; c determine a (labeled f) and n jacobians for cell c c-------------------------------------------------------------------*/ #pragma omp for private(j,k,tmp1,tmp2,tmp3) for (i = 1; i < grid_points[0]-1; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 0.0; fjac[ i][ j][ k][0][2] = 1.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][3] = 0.0; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][2][2] = ( 2.0 - c2 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1; fjac[i][j][k][2][4] = c2; fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = 0.0; fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * u[i][j][k][4] * tmp1 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] * tmp2; fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + 3.0 * u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = con43 * c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } } } /*-------------------------------------------------------------------- c now joacobians set, so form left hand side in y direction c-------------------------------------------------------------------*/ #pragma omp for private(j,k,tmp1,tmp2) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0] - tmp1 * njac[i][j-1][k][0][0] - tmp1 * dy1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1] - tmp1 * njac[i][j-1][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2] - tmp1 * njac[i][j-1][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3] - tmp1 * njac[i][j-1][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4] - tmp1 * njac[i][j-1][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0] - tmp1 * njac[i][j-1][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1] - tmp1 * njac[i][j-1][k][1][1] - tmp1 * dy2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2] - tmp1 * njac[i][j-1][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3] - tmp1 * njac[i][j-1][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4] - tmp1 * njac[i][j-1][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0] - tmp1 * njac[i][j-1][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1] - tmp1 * njac[i][j-1][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2] - tmp1 * njac[i][j-1][k][2][2] - tmp1 * dy3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3] - tmp1 * njac[i][j-1][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4] - tmp1 * njac[i][j-1][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0] - tmp1 * njac[i][j-1][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1] - tmp1 * njac[i][j-1][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2] - tmp1 * njac[i][j-1][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3] - tmp1 * njac[i][j-1][k][3][3] - tmp1 * dy4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4] - tmp1 * njac[i][j-1][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0] - tmp1 * njac[i][j-1][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1] - tmp1 * njac[i][j-1][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2] - tmp1 * njac[i][j-1][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3] - tmp1 * njac[i][j-1][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4] - tmp1 * njac[i][j-1][k][4][4] - tmp1 * dy5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0] - tmp1 * njac[i][j+1][k][0][0] - tmp1 * dy1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1] - tmp1 * njac[i][j+1][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2] - tmp1 * njac[i][j+1][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3] - tmp1 * njac[i][j+1][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4] - tmp1 * njac[i][j+1][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0] - tmp1 * njac[i][j+1][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1] - tmp1 * njac[i][j+1][k][1][1] - tmp1 * dy2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2] - tmp1 * njac[i][j+1][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3] - tmp1 * njac[i][j+1][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4] - tmp1 * njac[i][j+1][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0] - tmp1 * njac[i][j+1][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1] - tmp1 * njac[i][j+1][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2] - tmp1 * njac[i][j+1][k][2][2] - tmp1 * dy3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3] - tmp1 * njac[i][j+1][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4] - tmp1 * njac[i][j+1][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0] - tmp1 * njac[i][j+1][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1] - tmp1 * njac[i][j+1][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2] - tmp1 * njac[i][j+1][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3] - tmp1 * njac[i][j+1][k][3][3] - tmp1 * dy4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4] - tmp1 * njac[i][j+1][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0] - tmp1 * njac[i][j+1][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1] - tmp1 * njac[i][j+1][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2] - tmp1 * njac[i][j+1][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3] - tmp1 * njac[i][j+1][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4] - tmp1 * njac[i][j+1][k][4][4] - tmp1 * dy5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the block-diagonal matrix; c determine c (labeled f) and s jacobians c---------------------------------------------------------------------*/ #pragma omp for private(j,k,tmp1,tmp2,tmp3) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 0; k < grid_points[2]; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][j][k][0][0] = 0.0; fjac[i][j][k][0][1] = 0.0; fjac[i][j][k][0][2] = 0.0; fjac[i][j][k][0][3] = 1.0; fjac[i][j][k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][1][2] = 0.0; fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][2][1] = 0.0; fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1; fjac[i][j][k][3][3] = ( 2.0 - c2 ) * u[i][j][k][3] * tmp1; fjac[i][j][k][3][4] = c2; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 ) - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + 3.0*u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 )* tmp1; } } } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in z direction c-------------------------------------------------------------------*/ #pragma omp for private(j,k,tmp1,tmp2) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0] - tmp1 * njac[i][j][k-1][0][0] - tmp1 * dz1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1] - tmp1 * njac[i][j][k-1][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2] - tmp1 * njac[i][j][k-1][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3] - tmp1 * njac[i][j][k-1][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4] - tmp1 * njac[i][j][k-1][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0] - tmp1 * njac[i][j][k-1][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1] - tmp1 * njac[i][j][k-1][1][1] - tmp1 * dz2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2] - tmp1 * njac[i][j][k-1][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3] - tmp1 * njac[i][j][k-1][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4] - tmp1 * njac[i][j][k-1][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0] - tmp1 * njac[i][j][k-1][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1] - tmp1 * njac[i][j][k-1][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2] - tmp1 * njac[i][j][k-1][2][2] - tmp1 * dz3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3] - tmp1 * njac[i][j][k-1][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4] - tmp1 * njac[i][j][k-1][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0] - tmp1 * njac[i][j][k-1][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1] - tmp1 * njac[i][j][k-1][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2] - tmp1 * njac[i][j][k-1][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3] - tmp1 * njac[i][j][k-1][3][3] - tmp1 * dz4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4] - tmp1 * njac[i][j][k-1][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0] - tmp1 * njac[i][j][k-1][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1] - tmp1 * njac[i][j][k-1][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2] - tmp1 * njac[i][j][k-1][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3] - tmp1 * njac[i][j][k-1][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4] - tmp1 * njac[i][j][k-1][4][4] - tmp1 * dz5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0] - tmp1 * njac[i][j][k+1][0][0] - tmp1 * dz1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1] - tmp1 * njac[i][j][k+1][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2] - tmp1 * njac[i][j][k+1][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3] - tmp1 * njac[i][j][k+1][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4] - tmp1 * njac[i][j][k+1][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0] - tmp1 * njac[i][j][k+1][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1] - tmp1 * njac[i][j][k+1][1][1] - tmp1 * dz2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2] - tmp1 * njac[i][j][k+1][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3] - tmp1 * njac[i][j][k+1][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4] - tmp1 * njac[i][j][k+1][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0] - tmp1 * njac[i][j][k+1][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1] - tmp1 * njac[i][j][k+1][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2] - tmp1 * njac[i][j][k+1][2][2] - tmp1 * dz3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3] - tmp1 * njac[i][j][k+1][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4] - tmp1 * njac[i][j][k+1][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0] - tmp1 * njac[i][j][k+1][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1] - tmp1 * njac[i][j][k+1][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2] - tmp1 * njac[i][j][k+1][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3] - tmp1 * njac[i][j][k+1][3][3] - tmp1 * dz4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4] - tmp1 * njac[i][j][k+1][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0] - tmp1 * njac[i][j][k+1][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1] - tmp1 * njac[i][j][k+1][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2] - tmp1 * njac[i][j][k+1][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3] - tmp1 * njac[i][j][k+1][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4] - tmp1 * njac[i][j][k+1][4][4] - tmp1 * dz5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { int i, j, k, m; double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp for nowait private(j,k,rho_inv) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { rho_inv = 1.0/u[i][j][k][0]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[i][j][k][1] * rho_inv; vs[i][j][k] = u[i][j][k][2] * rho_inv; ws[i][j][k] = u[i][j][k][3] * rho_inv; square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp for private(j,k,m) for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = forcing[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for private(j,k,uijk,up1,um1) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + u[i-1][j][k][0]) - tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]); rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + u[i-1][j][k][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[i+1][j][k][1]*up1 - u[i-1][j][k][1]*um1 + (u[i+1][j][k][4]- square[i+1][j][k]- u[i-1][j][k][4]+ square[i-1][j][k])* c2); rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i+1][j][k][2] - 2.0*u[i][j][k][2] + u[i-1][j][k][2]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[i+1][j][k][2]*up1 - u[i-1][j][k][2]*um1); rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i+1][j][k][3] - 2.0*u[i][j][k][3] + u[i-1][j][k][3]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[i+1][j][k][3]*up1 - u[i-1][j][k][3]*um1); rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i+1][j][k][4] - 2.0*u[i][j][k][4] + u[i-1][j][k][4]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i-1][j][k][4]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[i+1][j][k][4] - c2*square[i+1][j][k])*up1 - (c1*u[i-1][j][k][4] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for nowait private(k,m) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } i = 2; #pragma omp for nowait private(k,m) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } #pragma omp for nowait private(j,k,m) for (i = 3; i < grid_points[0]-3; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m] ); } } } } i = grid_points[0]-3; #pragma omp for nowait private(k,m) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] ); } } } i = grid_points[0]-2; #pragma omp for private(k,m) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] + 5.0*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for private(j,k,vijk,vp1,vm1) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + u[i][j-1][k][0]) - ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]); rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + u[i][j-1][k][1]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[i][j+1][k][1]*vp1 - u[i][j-1][k][1]*vm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + u[i][j-1][k][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[i][j+1][k][2]*vp1 - u[i][j-1][k][2]*vm1 + (u[i][j+1][k][4] - square[i][j+1][k] - u[i][j-1][k][4] + square[i][j-1][k]) *c2); rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + u[i][j-1][k][3]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[i][j+1][k][3]*vp1 - u[i][j-1][k][3]*vm1); rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + u[i][j-1][k][4]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j-1][k][4]*rho_i[i][j-1][k]) - ty2 * ((c1*u[i][j+1][k][4] - c2*square[i][j+1][k]) * vp1 - (c1*u[i][j-1][k][4] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for nowait private(k,m) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } j = 2; #pragma omp for nowait private(k,m) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } #pragma omp for nowait private(j,k,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 3; j < grid_points[1]-3; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m] ); } } } } j = grid_points[1]-3; #pragma omp for nowait private(k,m) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] ); } } } j = grid_points[1]-2; #pragma omp for private(k,m) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] + 5.*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for private(j,k,wijk,wp1,wm1) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + u[i][j][k-1][0]) - tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]); rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + u[i][j][k-1][1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[i][j][k+1][1]*wp1 - u[i][j][k-1][1]*wm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + u[i][j][k-1][2]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[i][j][k+1][2]*wp1 - u[i][j][k-1][2]*wm1); rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + u[i][j][k-1][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[i][j][k+1][3]*wp1 - u[i][j][k-1][3]*wm1 + (u[i][j][k+1][4] - square[i][j][k+1] - u[i][j][k-1][4] + square[i][j][k-1]) *c2); rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + u[i][j][k-1][4]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j][k-1][4]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[i][j][k+1][4] - c2*square[i][j][k+1])*wp1 - (c1*u[i][j][k-1][4] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for nowait private(j,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } k = 2; #pragma omp for nowait private(j,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } #pragma omp for nowait private(j,k,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 3; k < grid_points[2]-3; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m] ); } } } } k = grid_points[2]-3; #pragma omp for nowait private(j,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] ); } } } k = grid_points[2]-2; #pragma omp for private(j,m) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 5.0*u[i][j][k][m] ); } } } #pragma omp for private(k,m,i) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { for (i = 1; i < grid_points[0]-1; i++) { rhs[i][j][k][m] = rhs[i][j][k][m] * dt; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *cclass, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine c-------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level c-------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing c-------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *cclass = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02 c-------------------------------------------------------------------*/ if (grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 60) { *cclass = 'S'; dtref = 1.0e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.7034283709541311e-01; xcrref[1] = 1.2975252070034097e-02; xcrref[2] = 3.2527926989486055e-02; xcrref[3] = 2.6436421275166801e-02; xcrref[4] = 1.9211784131744430e-01; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.9976913345811579e-04; xceref[1] = 4.5195666782961927e-05; xceref[2] = 7.3973765172921357e-05; xceref[3] = 7.3821238632439731e-05; xceref[4] = 8.9269630987491446e-04; /*-------------------------------------------------------------------- c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 24 && grid_points[1] == 24 && grid_points[2] == 24 && no_time_steps == 200) { *cclass = 'W'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.1125590409344e+03; xcrref[1] = 0.1180007595731e+02; xcrref[2] = 0.2710329767846e+02; xcrref[3] = 0.2469174937669e+02; xcrref[4] = 0.2638427874317e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.4419655736008e+01; xceref[1] = 0.4638531260002e+00; xceref[2] = 0.1011551749967e+01; xceref[3] = 0.9235878729944e+00; xceref[4] = 0.1018045837718e+02; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 200) { *cclass = 'A'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.0806346714637264e+02; xcrref[1] = 1.1319730901220813e+01; xcrref[2] = 2.5974354511582465e+01; xcrref[3] = 2.3665622544678910e+01; xcrref[4] = 2.5278963211748344e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.2348416040525025e+00; xceref[1] = 4.4390282496995698e-01; xceref[2] = 9.6692480136345650e-01; xceref[3] = 8.8302063039765474e-01; xceref[4] = 9.7379901770829278e+00; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 200 time steps, c with DT = 3.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 200) { *cclass = 'B'; dtref = 3.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.4233597229287254e+03; xcrref[1] = 9.9330522590150238e+01; xcrref[2] = 3.5646025644535285e+02; xcrref[3] = 3.2485447959084092e+02; xcrref[4] = 3.2707541254659363e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 5.2969847140936856e+01; xceref[1] = 4.4632896115670668e+00; xceref[2] = 1.3122573342210174e+01; xceref[3] = 1.2006925323559144e+01; xceref[4] = 1.2459576151035986e+02; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 200 time steps, c with DT = 1.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 200) { *cclass = 'C'; dtref = 1.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.62398116551764615e+04; xcrref[1] = 0.50793239190423964e+03; xcrref[2] = 0.15423530093013596e+04; xcrref[3] = 0.13302387929291190e+04; xcrref[4] = 0.11604087428436455e+05; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.16462008369091265e+03; xceref[1] = 0.11497107903824313e+02; xceref[2] = 0.41207446207461508e+02; xceref[3] = 0.37087651059694167e+02; xceref[4] = 0.36211053051841265e+03; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. c-------------------------------------------------------------------*/ if (*cclass != 'U') { printf(" Verification being performed for cclass %1c\n", *cclass); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *cclass = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown cclass\n"); } if (*cclass != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*cclass == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if (*cclass != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*cclass == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if (*cclass == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified == TRUE) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c c Performs line solves in X direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c c-------------------------------------------------------------------*/ lhsx(); x_solve_cell(); x_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(isize)=rhs[isize) c else assume U(isize) is loaded in un pack backsub_info c so just use it c after call u(istart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (i = grid_points[0]-2; i >= 0; i--) { #pragma omp for private(k,m,n) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve_cell(void) { /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(IMAX) and rhs'(IMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i,j,k,isize; isize = grid_points[0]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for private(k) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(0,j,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[0][j][k][BB], lhs[0][j][k][CC], rhs[0][j][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (i = 1; i < isize; i++) { #pragma omp for private ( k) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(i) = rhs(i) - A*rhs(i-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i-1][j][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(i) = B(i) - C(i-1)*A(i) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i-1][j][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for private (k) for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(isize) = rhs(isize) - A*rhs(isize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[isize][j][k][AA], rhs[isize-1][j][k], rhs[isize][j][k]); /*-------------------------------------------------------------------- c B(isize) = B(isize) - C(isize-1)*A(isize) c-------------------------------------------------------------------*/ matmul_sub(lhs[isize][j][k][AA], lhs[isize-1][j][k][CC], lhs[isize][j][k][BB]); /*-------------------------------------------------------------------- c multiply rhs() by b_inverse() and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][k][BB], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts bvec=bvec - ablock*avec c-------------------------------------------------------------------*/ int i; for (i = 0; i < 5; i++) { /*-------------------------------------------------------------------- c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) c $ - lhs[i,1,ablock,ia,ja,ka,acell)* c-------------------------------------------------------------------*/ bvec[i] = bvec[i] - ablock[i][0]*avec[0] - ablock[i][1]*avec[1] - ablock[i][2]*avec[2] - ablock[i][3]*avec[3] - ablock[i][4]*avec[4]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k) c-------------------------------------------------------------------*/ int j; for (j = 0; j < 5; j++) { cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j] - ablock[0][1]*bblock[1][j] - ablock[0][2]*bblock[2][j] - ablock[0][3]*bblock[3][j] - ablock[0][4]*bblock[4][j]; cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j] - ablock[1][1]*bblock[1][j] - ablock[1][2]*bblock[2][j] - ablock[1][3]*bblock[3][j] - ablock[1][4]*bblock[4][j]; cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j] - ablock[2][1]*bblock[1][j] - ablock[2][2]*bblock[2][j] - ablock[2][3]*bblock[3][j] - ablock[2][4]*bblock[4][j]; cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j] - ablock[3][1]*bblock[1][j] - ablock[3][2]*bblock[2][j] - ablock[3][3]*bblock[3][j] - ablock[3][4]*bblock[4][j]; cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j] - ablock[4][1]*bblock[1][j] - ablock[4][2]*bblock[2][j] - ablock[4][3]*bblock[3][j] - ablock[4][4]*bblock[4][j]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; c[0][0] = c[0][0]*pivot; c[0][1] = c[0][1]*pivot; c[0][2] = c[0][2]*pivot; c[0][3] = c[0][3]*pivot; c[0][4] = c[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; c[1][0] = c[1][0] - coeff*c[0][0]; c[1][1] = c[1][1] - coeff*c[0][1]; c[1][2] = c[1][2] - coeff*c[0][2]; c[1][3] = c[1][3] - coeff*c[0][3]; c[1][4] = c[1][4] - coeff*c[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; c[2][0] = c[2][0] - coeff*c[0][0]; c[2][1] = c[2][1] - coeff*c[0][1]; c[2][2] = c[2][2] - coeff*c[0][2]; c[2][3] = c[2][3] - coeff*c[0][3]; c[2][4] = c[2][4] - coeff*c[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; c[3][0] = c[3][0] - coeff*c[0][0]; c[3][1] = c[3][1] - coeff*c[0][1]; c[3][2] = c[3][2] - coeff*c[0][2]; c[3][3] = c[3][3] - coeff*c[0][3]; c[3][4] = c[3][4] - coeff*c[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; c[4][0] = c[4][0] - coeff*c[0][0]; c[4][1] = c[4][1] - coeff*c[0][1]; c[4][2] = c[4][2] - coeff*c[0][2]; c[4][3] = c[4][3] - coeff*c[0][3]; c[4][4] = c[4][4] - coeff*c[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; c[1][0] = c[1][0]*pivot; c[1][1] = c[1][1]*pivot; c[1][2] = c[1][2]*pivot; c[1][3] = c[1][3]*pivot; c[1][4] = c[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; c[0][0] = c[0][0] - coeff*c[1][0]; c[0][1] = c[0][1] - coeff*c[1][1]; c[0][2] = c[0][2] - coeff*c[1][2]; c[0][3] = c[0][3] - coeff*c[1][3]; c[0][4] = c[0][4] - coeff*c[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; c[2][0] = c[2][0] - coeff*c[1][0]; c[2][1] = c[2][1] - coeff*c[1][1]; c[2][2] = c[2][2] - coeff*c[1][2]; c[2][3] = c[2][3] - coeff*c[1][3]; c[2][4] = c[2][4] - coeff*c[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; c[3][0] = c[3][0] - coeff*c[1][0]; c[3][1] = c[3][1] - coeff*c[1][1]; c[3][2] = c[3][2] - coeff*c[1][2]; c[3][3] = c[3][3] - coeff*c[1][3]; c[3][4] = c[3][4] - coeff*c[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; c[4][0] = c[4][0] - coeff*c[1][0]; c[4][1] = c[4][1] - coeff*c[1][1]; c[4][2] = c[4][2] - coeff*c[1][2]; c[4][3] = c[4][3] - coeff*c[1][3]; c[4][4] = c[4][4] - coeff*c[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; c[2][0] = c[2][0]*pivot; c[2][1] = c[2][1]*pivot; c[2][2] = c[2][2]*pivot; c[2][3] = c[2][3]*pivot; c[2][4] = c[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; c[0][0] = c[0][0] - coeff*c[2][0]; c[0][1] = c[0][1] - coeff*c[2][1]; c[0][2] = c[0][2] - coeff*c[2][2]; c[0][3] = c[0][3] - coeff*c[2][3]; c[0][4] = c[0][4] - coeff*c[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; c[1][0] = c[1][0] - coeff*c[2][0]; c[1][1] = c[1][1] - coeff*c[2][1]; c[1][2] = c[1][2] - coeff*c[2][2]; c[1][3] = c[1][3] - coeff*c[2][3]; c[1][4] = c[1][4] - coeff*c[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; c[3][0] = c[3][0] - coeff*c[2][0]; c[3][1] = c[3][1] - coeff*c[2][1]; c[3][2] = c[3][2] - coeff*c[2][2]; c[3][3] = c[3][3] - coeff*c[2][3]; c[3][4] = c[3][4] - coeff*c[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; c[4][0] = c[4][0] - coeff*c[2][0]; c[4][1] = c[4][1] - coeff*c[2][1]; c[4][2] = c[4][2] - coeff*c[2][2]; c[4][3] = c[4][3] - coeff*c[2][3]; c[4][4] = c[4][4] - coeff*c[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; c[3][0] = c[3][0]*pivot; c[3][1] = c[3][1]*pivot; c[3][2] = c[3][2]*pivot; c[3][3] = c[3][3]*pivot; c[3][4] = c[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; c[0][0] = c[0][0] - coeff*c[3][0]; c[0][1] = c[0][1] - coeff*c[3][1]; c[0][2] = c[0][2] - coeff*c[3][2]; c[0][3] = c[0][3] - coeff*c[3][3]; c[0][4] = c[0][4] - coeff*c[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; c[1][0] = c[1][0] - coeff*c[3][0]; c[1][1] = c[1][1] - coeff*c[3][1]; c[1][2] = c[1][2] - coeff*c[3][2]; c[1][3] = c[1][3] - coeff*c[3][3]; c[1][4] = c[1][4] - coeff*c[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; c[2][0] = c[2][0] - coeff*c[3][0]; c[2][1] = c[2][1] - coeff*c[3][1]; c[2][2] = c[2][2] - coeff*c[3][2]; c[2][3] = c[2][3] - coeff*c[3][3]; c[2][4] = c[2][4] - coeff*c[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; c[4][0] = c[4][0] - coeff*c[3][0]; c[4][1] = c[4][1] - coeff*c[3][1]; c[4][2] = c[4][2] - coeff*c[3][2]; c[4][3] = c[4][3] - coeff*c[3][3]; c[4][4] = c[4][4] - coeff*c[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; c[4][0] = c[4][0]*pivot; c[4][1] = c[4][1]*pivot; c[4][2] = c[4][2]*pivot; c[4][3] = c[4][3]*pivot; c[4][4] = c[4][4]*pivot; r[4] = r[4] *pivot; coeff = lhs[0][4]; c[0][0] = c[0][0] - coeff*c[4][0]; c[0][1] = c[0][1] - coeff*c[4][1]; c[0][2] = c[0][2] - coeff*c[4][2]; c[0][3] = c[0][3] - coeff*c[4][3]; c[0][4] = c[0][4] - coeff*c[4][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; c[1][0] = c[1][0] - coeff*c[4][0]; c[1][1] = c[1][1] - coeff*c[4][1]; c[1][2] = c[1][2] - coeff*c[4][2]; c[1][3] = c[1][3] - coeff*c[4][3]; c[1][4] = c[1][4] - coeff*c[4][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; c[2][0] = c[2][0] - coeff*c[4][0]; c[2][1] = c[2][1] - coeff*c[4][1]; c[2][2] = c[2][2] - coeff*c[4][2]; c[2][3] = c[2][3] - coeff*c[4][3]; c[2][4] = c[2][4] - coeff*c[4][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; c[3][0] = c[3][0] - coeff*c[4][0]; c[3][1] = c[3][1] - coeff*c[4][1]; c[3][2] = c[3][2] - coeff*c[4][2]; c[3][3] = c[3][3] - coeff*c[4][3]; c[3][4] = c[3][4] - coeff*c[4][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvrhs( double lhs[5][5], double r[5] ) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; r[4] = r[4] *pivot; coeff = lhs[0][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Y direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix][ c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsy(); y_solve_cell(); y_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell][ then generate U(jsize)=rhs(jsize) c else assume U(jsize) is loaded in un pack backsub_info c so just use it c after call u(jstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (j = grid_points[1]-2; j >= 0; j--) { #pragma omp for private(k,m,n) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(JMAX) and rhs'(JMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, jsize; jsize = grid_points[1]-1; #pragma omp for private(k) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(i,0,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][0][k][BB], lhs[i][0][k][CC], rhs[i][0][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (j = 1; j < jsize; j++) { #pragma omp for private(k) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(j-1) from lhs_vector(j) c c rhs(j) = rhs(j) - A*rhs(j-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j-1][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(j) = B(j) - C(j-1)*A(j) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j-1][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for private(k) for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][jsize][k][AA], rhs[i][jsize-1][k], rhs[i][jsize][k]); /*-------------------------------------------------------------------- c B(jsize) = B(jsize) - C(jsize-1)*A(jsize) c call matmul_sub(aa,i,jsize,k,c, c $ cc,i,jsize-1,k,c,BB,i,jsize,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][jsize][k][AA], lhs[i][jsize-1][k][CC], lhs[i][jsize][k][BB]); /*-------------------------------------------------------------------- c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][jsize][k][BB], rhs[i][jsize][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Z direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsz(); z_solve_cell(); z_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(ksize)=rhs(ksize) c else assume U(ksize) is loaded in un pack backsub_info c so just use it c after call u(kstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; #pragma omp for private(j,k,m,n) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = grid_points[2]-2; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(KMAX) and rhs'(KMAX) will be sent to next cell. c-------------------------------------------------------------------*/ int i,j,k,ksize; ksize = grid_points[2]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for private(j) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c multiply c(i,j,0) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][0][BB], lhs[i][j][0][CC], rhs[i][j][0] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (k = 1; k < ksize; k++) { #pragma omp for private(j) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(k-1) from lhs_vector(k) c c rhs(k) = rhs(k) - A*rhs(k-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j][k-1], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(k) = B(k) - C(k-1)*A(k) c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j][k-1][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- c Now finish up special cases for last cell c-------------------------------------------------------------------*/ #pragma omp for private(j) for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][ksize][AA], rhs[i][j][ksize-1], rhs[i][j][ksize]); /*-------------------------------------------------------------------- c B(ksize) = B(ksize) - C(ksize-1)*A(ksize) c call matmul_sub(aa,i,j,ksize,c, c $ cc,i,j,ksize-1,c,BB,i,j,ksize) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][ksize][AA], lhs[i][j][ksize-1][CC], lhs[i][j][ksize][BB]); /*-------------------------------------------------------------------- c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][ksize][BB], rhs[i][j][ksize] ); } } } /*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ void c_print_results( char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand) { char *evalue="1000"; printf( "\n\n %s Benchmark Completed\n", name ); printf( " cclass = %c\n", cclass ); if( n2 == 0 && n3 == 0 ) printf( " Size = %12d\n", n1 ); /* as in IS */ else printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Threads = %12d\n", nthreads ); printf( " Time in seconds = %12.2f\n", t ); printf( " Mop/s total = %12.2f\n", mops ); printf( " Operation type = %24s\n", optype); if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( " RAND = %s\n", rand ); #ifdef SMP evalue = getenv("MP_SET_NUMTHREADS"); printf( " MULTICPUS = %s\n", evalue ); #endif /* printf( "\n\n" ); printf( " Please send the results of this run to:\n\n" ); printf( " NPB Development Team\n" ); printf( " Internet: npb@nas.nasa.gov\n \n" ); printf( " If email is not available, send this to:\n\n" ); printf( " MS T27A-1\n" ); printf( " NASA Ames Research Center\n" ); printf( " Moffett Field, CA 94035-1000\n\n" ); printf( " Fax: 415-604-3957\n\n" );*/ } /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return( t ); } double start[64], elapsed[64]; /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); }
sornaive.c
#include <stdio.h> #include <math.h> #include <string.h> #include <sys/time.h> #include <omp.h> #define M 500 double compute_error(double solution[][M + 2], double u[][M + 2], const int m); int sor(double unew[][M + 2], double uold[][M + 2], double solution[][M + 2], const double omega, const double tol, const int m); int main(void) { /* Solution of Laplace's Equation. ============================== ! *** ! *** Uxx + Uyy = 0 ! *** 0 <= x <= pi, 0 <= y <= pi ! *** U(x,pi) = sin(x), U(x,0) = U(0,y) = U(pi,y) = 0 ! *** ! *** then U(x,y) = (sinh(y)*sin(x)) / sinh(pi) ! *** ! *** Should converge with ! *** tol = 0.001 and M = 20 in 42 iterations. ! *** and with tol = 0.001 and M = 100 in 198 iterations. */ const int m = M; // struct timeval tic, toc; double unew[m + 2][m + 2] = {{ 0 }}; double solution[m + 2][m + 2] = {{ 0 }}; double uold[m + 2][m + 2] = {{ 0 }}; int i, j; // gettimeofday(&tic, NULL); const double begin = omp_get_wtime(); const double pi = 4.0 * atan(1.0); const double h = pi / (m + 1); #pragma omp parallel private(i, j) shared(uold, solution) { #pragma omp for schedule(static, 100) for(i = 0; i < m + 2; ++i) { uold[i][M + 1] = sin(i * h); } #pragma omp for schedule(static, 100) for(i = 0; i < m + 2; ++i) { for(j = 0; j < m + 1; ++j) { uold[i][j] = j * h * uold[i][M + 1]; // printf("%.40f\n", uold[i][j]); } } #pragma omp for schedule(static, 100) for(i = 0; i < m + 2; ++i) { for(j = 0; j < m + 2; ++j) { solution[i][j] = sinh(j * h) * sin(i * h) / sinh(pi); // printf("%.40f\n", solution[i][j]); } } } const double omega = 2.0 / ( 1.0 + sin(pi / (m + 1)) ); const double tol = 0.001; const int iters = sor(unew, uold, solution, omega, tol, m); // gettimeofday(&toc, NULL); const double end = omp_get_wtime(); printf(" \n"); printf(" Omega = %f\n", omega); printf(" It took %d iterations.\n", iters); // printf("Total time = %f\n\n\n", (double) (toc.tv_usec - tic.tv_usec) / 1000000 + // (double) (toc.tv_sec - tic.tv_sec)); printf("Total time = %f\n\n\n", end - begin); return 0; } double compute_error(double solution[][M + 2], double u[][M + 2], const int m) { double error = 0.0; int i, j; for(i = 1; i < m + 1; ++i) { for(j = 1; j < m + 1; ++j) { const double abs_diff = fabs(solution[i][j] - u[i][j]); if(error < abs_diff) { error = abs_diff; } } } return error; } int sor(double unew[][M + 2], double uold[][M + 2], double solution[][M + 2], const double omega, const double tol, const int m) { int i, j; for(i = 0; i < m + 2; ++i) { unew[i][m + 1] = uold[i][m + 1]; unew[m + 1][i] = uold[m + 1][i]; unew[i][0] = uold[i][0]; unew[0][i] = uold[0][i]; } int iters = 0; double error = compute_error(solution, uold, m); while(error > tol) { for(i = 1; i < m + 1; ++i) { for(j = 1; j < m + 1; ++j) { unew[i][j] = uold[i][j] + 0.25 * omega * (unew[i - 1][j] + unew[i][j - 1] + uold[i + 1][j] + uold[i][j + 1] - 4.0 * uold[i][j]); } } for(i = 1; i < m + 1; ++i) { memcpy(&uold[i][1], &unew[i][1], m * sizeof(double)); //for(j = 1; j < m + 1; ++j) //{ // uold[i][j] = unew[i][j]; //} } ++iters; if(iters % 20 == 0) { error = compute_error(solution, uold, m); } } return iters; }
parallel-walsh.c
#include <stdlib.h> #include <stdint.h> void fast_parallel_walsh(int* vec, int vecSize) { // if the vector is of size 1 or less (0), we dont need to do anything and we can just return it as is if(vecSize<=1) return; // start to divide the original vecSize into 2 on every step, until we can't divide any longer register int partition = vecSize >> 1; //how many sections do we have to go over in this iteration register int sections = 1; while(partition>0){ // go over all sections - this section can be parallel! #pragma omp parallel for schedule(static) collapse(2) for(register int i=0; i<sections; i++) { // go over all the current vector to calculate the new values for (register int j = 0; j < partition; j++) { // in here we want to have two indexes to vec: 0 and 0+partitionSize/2 register int firstPart = (i<<1)*partition+j; register int secondPart = firstPart+partition; register int tmp = vec[firstPart]; vec[firstPart] = tmp + vec[secondPart]; vec[secondPart] = tmp - vec[secondPart]; } } partition >>= 1; sections <<= 1; } } int numberOfSetBits(uint32_t i) { i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int* createCol(int colNum, int vecSize){ int* col = (int*)malloc(sizeof(int)*vecSize); if(!col) exit(1); for(int i=0; i<vecSize; i++){ col[i] = (numberOfSetBits(i&colNum) % 2 == 0)*2 - 1; } return col; } void simple_parallel_walsh(int* vec, int vecSize) { // if the vector is of size 1 or less (0), we dont need to do anything and we can just return it as is if(vecSize<=1) return; //allocate a tmp vector to save the results until we're done. int* tmpRes = (int*)malloc(sizeof(int)*vecSize); if(!tmpRes) exit(1); // let's calc the value in each cell in the vector //in this section, we can create multiple threads to compute this part #pragma omp for schedule(static) for(int i=0; i<vecSize; i++){ tmpRes[i]=0; //first, create the right col from the hadamard matrix int* col = createCol(i,vecSize); //go over the created col and the original vec to multiple for(int j=0; j<vecSize; j++){ tmpRes[i] += col[j]*vec[j]; } //free the col before next iteration allocation free(col); } //move the results from the tmpVec to vec, and fre tmpVec //all threads must finish calculating before moving to copy the results, and free-ing the tmpRes vector #pragma omp barrier #pragma omp for schedule(static) for(int i=0; i<vecSize; i++){ vec[i]=tmpRes[i]; } free(tmpRes); }
comment_manager.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "mpi.h" #include "map.h" #include "map_iterator.h" #include "utils.h" #include "constants.h" #include "types.h" #include "comment_manager.h" #include "comment.h" #include "parser.h" int comments_size; /* * Set commented_post field to the real post id, if c is a reply to another comment * the value is retrieved from the parent comment which is always up to date */ void set_commented_post(map_t comments, comment* c) { if(c->commented_post == -1) { comment* comment_replied = map_get(comments, c->comment_replied); c->commented_post = comment_replied->commented_post; } } void delete_inactive_comments(map_t comments_to_delete, map_t comments) { void *iterator = map_it_init(comments_to_delete); long k; comment *c; while(map_it_hasnext(comments_to_delete, iterator)){ k = map_it_next(comments_to_delete, &iterator); //printf("COMMENT MANAGER: deleting comment %ld ...\n", k); c = map_get(comments_to_delete, k); comments = map_remove(comments, k); comment_delete(c); printf("COMMENT MANAGER: comment %ld deleted\n", k); } comments_to_delete = map_empty(comments_to_delete); //printf("COMMENT MANAGER: delete completed\n"); } void comment_daily_decrement(map_t comments, map_t posts_to_update,long current_ts) { void *iterator = map_it_init(comments); long k; comment *c; map_t comments_to_delete = map_init(); int comments_to_delete_size = 0; printf("COMMENT MANAGER: daily decrement started\n"); while(map_it_hasnext(comments, iterator)){ long lifetime, num_of_dec, pod = PERIOD_OF_DECR; int delta; k = map_it_next(comments, &iterator); c = map_get(comments, k); //time elapsed since last decrementation lifetime = current_ts - c->ts; //correct number of decrementation num_of_dec = lifetime / pod; //num of points to be subtracted to the score delta = (int) (num_of_dec - c->num_of_dec) * (-1); if(delta != 0){ long post_id; post_increment *pi; bool is_active = comment_update_score(c, delta, false); post_id = c->commented_post; pi = map_get(posts_to_update, post_id); //if the post is not in the list, it means that it is the first time that //an increment is recorded for the post if( pi == NULL ) { pi = (post_increment*) malloc(sizeof(post_increment)); pi->post_id = post_id; pi->increment = delta; posts_to_update = map_put(posts_to_update, post_id, pi); } //the post is already recorded in the list, so we update the increment with //the one of the current comment else { pi->increment = pi->increment + delta; } if(!is_active){ comments_to_delete = map_put(comments_to_delete, k, c); } } } //delete of the inactive posts comments_to_delete_size = map_size(comments_to_delete); printf("COMMENT MANAGER: I have to delete %d comments\n", comments_to_delete_size); comments_size = comments_size - comments_to_delete_size; delete_inactive_comments(comments_to_delete, comments); } void parallel_process_comment(comment *c, long current_ts, map_t *comments_to_delete, map_t *posts_to_update) { long lifetime, num_of_dec, pod = PERIOD_OF_DECR; int delta; //time elapsed since last decrementation lifetime = current_ts - c->ts; //correct number of decrementation num_of_dec = lifetime / pod; //num of points to be subtracted to the score delta = (int) (num_of_dec - c->num_of_dec) * (-1); if(delta != 0){ long post_id; post_increment *pi; bool is_active = comment_update_score(c, delta, false); post_id = c->commented_post; #pragma omp critical { pi = map_get(*posts_to_update, post_id); //if the post is not in the list, it means that it is the first time that //an increment is recorded for the post if( pi == NULL ) { pi = (post_increment*) malloc(sizeof(post_increment)); pi->post_id = post_id; pi->increment = delta; *posts_to_update = map_put(*posts_to_update, post_id, pi); } //the post is already recorded in the list, so we update the increment with //the one of the current comment else { pi->increment = pi->increment + delta; } if(!is_active){ *comments_to_delete = map_put(*comments_to_delete, c->comment_id, c); } } } } void comment_parallel_daily_decrement(map_t comments, map_t posts_to_update, long current_ts) { void *iterator = map_it_init(comments); long k; comment *c; map_t comments_to_delete = map_init(); int comments_to_delete_size = 0; printf("COMMENT MANAGER: daily decrement started\n"); #pragma omp parallel shared (comments, posts_to_update, comments_to_delete) num_threads(NUM_OF_THREADS) { // one thread adds all tasks to the queue //printf("COMMENT_MANAGER: Num of threads is %d\n", omp_get_num_threads()); #pragma omp single while(map_it_hasnext(comments, iterator)){ k = map_it_next(comments, &iterator); c = map_get(comments, k); #pragma omp task firstprivate (c) // task is inserted in the queue and an available // thread will execute it parallel_process_comment(c, current_ts, &comments_to_delete, &posts_to_update); } } //delete of the inactive comments comments_to_delete_size = map_size(comments_to_delete); printf("COMMENT MANAGER: I have to delete %d comments\n", comments_to_delete_size); comments_size = comments_size - comments_to_delete_size; delete_inactive_comments(comments_to_delete, comments); } void comment_manager_run(char *path){ time_t stop_time = STOP; int count; ts_rank current_tr; MPI_Status stat; map_t comments = map_init(); map_t posts_to_update = map_init(); struct comment* c = NULL; post_increment pi; comments_size = 0; FILE *file; if(path[0]!='\0') file = fopen(path,"r"); else file = stdin; if(!file) { printf("Error opening file\n"); return; } while(c = parser_next_comment(&file)) { //get the commented post //printf("COMMENT_MANAGER: Comment read: %ld\n", c->comment_id); set_commented_post(comments, c); //save new comment in the comments map comments = map_put(comments, c->comment_id, c); comments_size++; printf("COMMENT_MANAGER: Comments size = %d\n", comments_size); // Send timestamp of latest post MPI_Send(&(c->ts), 1, MPI_LONG, MASTER, GENERIC_TAG, MPI_COMM_WORLD); // Receive current timestamp from master MPI_Bcast(&current_tr, 1, MPI_LONG_INT, MASTER, MPI_COMM_WORLD); printf("COMMENT_MANAGER: Current ts is: %ld, from process %d\n", current_tr.ts, current_tr.rank); //if the received timestamp is greater or equal than my ts, then compute updates and read next //else if it's less than my ts, wait until the post manager reaches at least my ts while (c->ts >= current_tr.ts && current_tr.rank != COMMENT_MANAGER ) { //read next timestamp MPI_Bcast(&current_tr, 1, MPI_LONG_INT, MASTER, MPI_COMM_WORLD); } void *iterator; long post_id; //calculate 24H decrements and fill posts_to_update list comment_parallel_daily_decrement(comments, posts_to_update, current_tr.ts); // Send number of posts that need to be updated count = map_size(posts_to_update) + 1; MPI_Send(&count, 1, MPI_INT, POST_MANAGER, GENERIC_TAG, MPI_COMM_WORLD); iterator = map_it_init(posts_to_update); //send all the daily updates to post manager while(map_it_hasnext(posts_to_update, iterator)){ post_increment *pi_ptr; post_id = map_it_next(posts_to_update, &iterator); pi_ptr = map_get(posts_to_update, post_id); pi.post_id = pi_ptr->post_id; pi.increment = pi_ptr->increment; MPI_Send(&pi, 1, MPI_LONG_INT, POST_MANAGER, DECREMENT_UPDATE, MPI_COMM_WORLD); } //send the new comment to post manager pi.post_id = c->commented_post; pi.increment = STARTING_SCORE; printf("COMMENT MANAGER: sending new comment for post %ld\n", pi.post_id); MPI_Send(&pi, 1, MPI_LONG_INT, POST_MANAGER, NEW_COMMENT_UPDATE, MPI_COMM_WORLD); //send the user id of the commenter MPI_Send(&(c->user_id), 1, MPI_LONG, POST_MANAGER, GENERIC_TAG, MPI_COMM_WORLD); } MPI_Send(&stop_time, 1, MPI_LONG, MASTER, GENERIC_TAG, MPI_COMM_WORLD); }
ex2.c
#include <errno.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define PERROR fprintf(stderr, "%s:%d: error: %s\n", __FILE__, __LINE__, strerror(errno)) #define PERROR_GOTO(label) \ do { \ PERROR; \ goto label; \ } while (0) #define INIT_ARRAY(arr, label) \ do { \ if (!(arr)) PERROR_GOTO(label); \ for (long i = 0; i < n; ++i) { \ (arr)[i] = malloc(sizeof(**(arr)) * n); \ if (!(arr)[i]) PERROR_GOTO(label); \ } \ } while (0) void free_2d_array(int **arr, long len) { if (!arr) { return; } for (long i = 0; i < len; ++i) { if (!arr[i]) { break; } free(arr[i]); } free(arr); } int main(int argc, char **argv) { // handle input if (argc != 2) { fprintf(stderr, "Error: usage: %s <n>\n", argv[0]); return EXIT_FAILURE; } errno = 0; char *str = argv[1]; char *endptr; long n = strtol(str, &endptr, 0); if (errno != 0) { perror("strtol"); return EXIT_FAILURE; } if (endptr == str) { fprintf(stderr, "Error: no digits were found!\n"); return EXIT_FAILURE; } if (n < 0) { fprintf(stderr, "Error: matrix size must not be negative!\n"); return EXIT_FAILURE; } // allocate memory int status = EXIT_FAILURE; int **a = malloc(sizeof(*a) * n); INIT_ARRAY(a, error_a); int **b = malloc(sizeof(*b) * n); INIT_ARRAY(b, error_b); int **c = malloc(sizeof(*c) * n); INIT_ARRAY(c, error_c); unsigned *local_res = malloc(omp_get_max_threads() * sizeof(*local_res)); if (!local_res) PERROR_GOTO(error_c); status = EXIT_SUCCESS; // fill matrix srand(7); for (long i = 0; i < n; ++i) { for (long j = 0; j < n; ++j) { a[i][j] = rand(); b[i][j] = rand(); } } double start_time = omp_get_wtime(); #pragma omp parallel default(none) shared(n, a, b, c, local_res) { // matrix multiplication #pragma omp parallel for default(none) shared(n, a, b, c) for (long i = 0; i < n; ++i) { for (long j = 0; j < n; ++j) { for (long k = 0; k < n; ++k) { c[i][j] += a[i][k] * b[k][j]; } } } // sum of matrix c #pragma omp parallel for default(none) shared(n, a, b, c, local_res) for (long i = 0; i < n; ++i) { for (long j = 0; j < n; ++j) { local_res[omp_get_thread_num()] += c[i][j]; } } } unsigned long res = 0; for (int l = 0; l < omp_get_num_threads(); ++l) { res += local_res[l]; } double end_time = omp_get_wtime(); printf("res: %lu, time: %2.2f seconds\n", res, end_time - start_time); // cleanup free(local_res); error_c: free_2d_array(c, n); error_b: free_2d_array(b, n); error_a: free_2d_array(a, n); return status; }
convolutiondepthwise_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + g * 4) : _mm_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m128 _k00 = _mm_loadu_ps(k0); __m128 _k01 = _mm_loadu_ps(k0 + 4); __m128 _k02 = _mm_loadu_ps(k0 + 8); __m128 _k10 = _mm_loadu_ps(k0 + 12); __m128 _k11 = _mm_loadu_ps(k0 + 16); __m128 _k12 = _mm_loadu_ps(k0 + 20); __m128 _k20 = _mm_loadu_ps(k0 + 24); __m128 _k21 = _mm_loadu_ps(k0 + 28); __m128 _k22 = _mm_loadu_ps(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_loadu_ps(r0 + 12); __m128 _r13 = _mm_loadu_ps(r1 + 12); __m128 _r23 = _mm_loadu_ps(r2 + 12); _mm_storeu_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r23, _sum1); __m128 _sum2 = _bias0; __m128 _r04 = _mm_loadu_ps(r0 + 16); __m128 _r14 = _mm_loadu_ps(r1 + 16); __m128 _r24 = _mm_loadu_ps(r2 + 16); _mm_storeu_ps(outptr0 + 4, _sum1); _sum2 = _mm_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm_comp_fmadd_ps(_k22, _r24, _sum2); __m128 _sum3 = _bias0; __m128 _r05 = _mm_loadu_ps(r0 + 20); __m128 _r15 = _mm_loadu_ps(r1 + 20); __m128 _r25 = _mm_loadu_ps(r2 + 20); _mm_storeu_ps(outptr0 + 8, _sum2); _sum3 = _mm_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm_comp_fmadd_ps(_k22, _r25, _sum3); __m128 _sum4 = _bias0; __m128 _r06 = _mm_loadu_ps(r0 + 24); __m128 _r16 = _mm_loadu_ps(r1 + 24); __m128 _r26 = _mm_loadu_ps(r2 + 24); _mm_storeu_ps(outptr0 + 12, _sum3); _sum4 = _mm_comp_fmadd_ps(_k00, _r04, _sum4); _sum4 = _mm_comp_fmadd_ps(_k01, _r05, _sum4); _sum4 = _mm_comp_fmadd_ps(_k02, _r06, _sum4); _sum4 = _mm_comp_fmadd_ps(_k10, _r14, _sum4); _sum4 = _mm_comp_fmadd_ps(_k11, _r15, _sum4); _sum4 = _mm_comp_fmadd_ps(_k12, _r16, _sum4); _sum4 = _mm_comp_fmadd_ps(_k20, _r24, _sum4); _sum4 = _mm_comp_fmadd_ps(_k21, _r25, _sum4); _sum4 = _mm_comp_fmadd_ps(_k22, _r26, _sum4); __m128 _sum5 = _bias0; __m128 _r07 = _mm_loadu_ps(r0 + 28); __m128 _r17 = _mm_loadu_ps(r1 + 28); __m128 _r27 = _mm_loadu_ps(r2 + 28); _mm_storeu_ps(outptr0 + 16, _sum4); _sum5 = _mm_comp_fmadd_ps(_k00, _r05, _sum5); _sum5 = _mm_comp_fmadd_ps(_k01, _r06, _sum5); _sum5 = _mm_comp_fmadd_ps(_k02, _r07, _sum5); _sum5 = _mm_comp_fmadd_ps(_k10, _r15, _sum5); _sum5 = _mm_comp_fmadd_ps(_k11, _r16, _sum5); _sum5 = _mm_comp_fmadd_ps(_k12, _r17, _sum5); _sum5 = _mm_comp_fmadd_ps(_k20, _r25, _sum5); _sum5 = _mm_comp_fmadd_ps(_k21, _r26, _sum5); _sum5 = _mm_comp_fmadd_ps(_k22, _r27, _sum5); __m128 _sum6 = _bias0; __m128 _r08 = _mm_loadu_ps(r0 + 32); __m128 _r18 = _mm_loadu_ps(r1 + 32); __m128 _r28 = _mm_loadu_ps(r2 + 32); _mm_storeu_ps(outptr0 + 20, _sum5); _sum6 = _mm_comp_fmadd_ps(_k00, _r06, _sum6); _sum6 = _mm_comp_fmadd_ps(_k01, _r07, _sum6); _sum6 = _mm_comp_fmadd_ps(_k02, _r08, _sum6); _sum6 = _mm_comp_fmadd_ps(_k10, _r16, _sum6); _sum6 = _mm_comp_fmadd_ps(_k11, _r17, _sum6); _sum6 = _mm_comp_fmadd_ps(_k12, _r18, _sum6); _sum6 = _mm_comp_fmadd_ps(_k20, _r26, _sum6); _sum6 = _mm_comp_fmadd_ps(_k21, _r27, _sum6); _sum6 = _mm_comp_fmadd_ps(_k22, _r28, _sum6); __m128 _sum7 = _bias0; __m128 _r09 = _mm_loadu_ps(r0 + 36); __m128 _r19 = _mm_loadu_ps(r1 + 36); __m128 _r29 = _mm_loadu_ps(r2 + 36); _mm_storeu_ps(outptr0 + 24, _sum6); _sum7 = _mm_comp_fmadd_ps(_k00, _r07, _sum7); _sum7 = _mm_comp_fmadd_ps(_k01, _r08, _sum7); _sum7 = _mm_comp_fmadd_ps(_k02, _r09, _sum7); _sum7 = _mm_comp_fmadd_ps(_k10, _r17, _sum7); _sum7 = _mm_comp_fmadd_ps(_k11, _r18, _sum7); _sum7 = _mm_comp_fmadd_ps(_k12, _r19, _sum7); _sum7 = _mm_comp_fmadd_ps(_k20, _r27, _sum7); _sum7 = _mm_comp_fmadd_ps(_k21, _r28, _sum7); _sum7 = _mm_comp_fmadd_ps(_k22, _r29, _sum7); _mm_storeu_ps(outptr0 + 28, _sum7); r0 += 32; r1 += 32; r2 += 32; outptr0 += 32; } for (; j + 3 < outw; j += 4) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_loadu_ps(r0 + 12); __m128 _r13 = _mm_loadu_ps(r1 + 12); __m128 _r23 = _mm_loadu_ps(r2 + 12); _mm_storeu_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r23, _sum1); __m128 _sum2 = _bias0; __m128 _r04 = _mm_loadu_ps(r0 + 16); __m128 _r14 = _mm_loadu_ps(r1 + 16); __m128 _r24 = _mm_loadu_ps(r2 + 16); _mm_storeu_ps(outptr0 + 4, _sum1); _sum2 = _mm_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm_comp_fmadd_ps(_k22, _r24, _sum2); __m128 _sum3 = _bias0; __m128 _r05 = _mm_loadu_ps(r0 + 20); __m128 _r15 = _mm_loadu_ps(r1 + 20); __m128 _r25 = _mm_loadu_ps(r2 + 20); _mm_storeu_ps(outptr0 + 8, _sum2); _sum3 = _mm_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm_comp_fmadd_ps(_k22, _r25, _sum3); _mm_storeu_ps(outptr0 + 12, _sum3); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j + 1 < outw; j += 2) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_loadu_ps(r0 + 12); __m128 _r13 = _mm_loadu_ps(r1 + 12); __m128 _r23 = _mm_loadu_ps(r2 + 12); _mm_storeu_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r23, _sum1); _mm_storeu_ps(outptr0 + 4, _sum1); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } for (; j < outw; j++) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); _mm_storeu_ps(outptr0, _sum0); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } } } static void convdw3x3s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m128 _bias0 = bias ? _mm_loadu_ps((const float*)bias + g * 4) : _mm_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m128 _k00 = _mm_loadu_ps(k0); __m128 _k01 = _mm_loadu_ps(k0 + 4); __m128 _k02 = _mm_loadu_ps(k0 + 8); __m128 _k10 = _mm_loadu_ps(k0 + 12); __m128 _k11 = _mm_loadu_ps(k0 + 16); __m128 _k12 = _mm_loadu_ps(k0 + 20); __m128 _k20 = _mm_loadu_ps(k0 + 24); __m128 _k21 = _mm_loadu_ps(k0 + 28); __m128 _k22 = _mm_loadu_ps(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_loadu_ps(r0 + 12); __m128 _r13 = _mm_loadu_ps(r1 + 12); __m128 _r23 = _mm_loadu_ps(r2 + 12); __m128 _r04 = _mm_loadu_ps(r0 + 16); __m128 _r14 = _mm_loadu_ps(r1 + 16); __m128 _r24 = _mm_loadu_ps(r2 + 16); _mm_storeu_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r24, _sum1); __m128 _sum2 = _bias0; __m128 _r05 = _mm_loadu_ps(r0 + 20); __m128 _r15 = _mm_loadu_ps(r1 + 20); __m128 _r25 = _mm_loadu_ps(r2 + 20); __m128 _r06 = _mm_loadu_ps(r0 + 24); __m128 _r16 = _mm_loadu_ps(r1 + 24); __m128 _r26 = _mm_loadu_ps(r2 + 24); _mm_storeu_ps(outptr0 + 4, _sum1); _sum2 = _mm_comp_fmadd_ps(_k00, _r04, _sum2); _sum2 = _mm_comp_fmadd_ps(_k01, _r05, _sum2); _sum2 = _mm_comp_fmadd_ps(_k02, _r06, _sum2); _sum2 = _mm_comp_fmadd_ps(_k10, _r14, _sum2); _sum2 = _mm_comp_fmadd_ps(_k11, _r15, _sum2); _sum2 = _mm_comp_fmadd_ps(_k12, _r16, _sum2); _sum2 = _mm_comp_fmadd_ps(_k20, _r24, _sum2); _sum2 = _mm_comp_fmadd_ps(_k21, _r25, _sum2); _sum2 = _mm_comp_fmadd_ps(_k22, _r26, _sum2); __m128 _sum3 = _bias0; __m128 _r07 = _mm_loadu_ps(r0 + 28); __m128 _r17 = _mm_loadu_ps(r1 + 28); __m128 _r27 = _mm_loadu_ps(r2 + 28); __m128 _r08 = _mm_loadu_ps(r0 + 32); __m128 _r18 = _mm_loadu_ps(r1 + 32); __m128 _r28 = _mm_loadu_ps(r2 + 32); _mm_storeu_ps(outptr0 + 8, _sum2); _sum3 = _mm_comp_fmadd_ps(_k00, _r06, _sum3); _sum3 = _mm_comp_fmadd_ps(_k01, _r07, _sum3); _sum3 = _mm_comp_fmadd_ps(_k02, _r08, _sum3); _sum3 = _mm_comp_fmadd_ps(_k10, _r16, _sum3); _sum3 = _mm_comp_fmadd_ps(_k11, _r17, _sum3); _sum3 = _mm_comp_fmadd_ps(_k12, _r18, _sum3); _sum3 = _mm_comp_fmadd_ps(_k20, _r26, _sum3); _sum3 = _mm_comp_fmadd_ps(_k21, _r27, _sum3); _sum3 = _mm_comp_fmadd_ps(_k22, _r28, _sum3); _mm_storeu_ps(outptr0 + 12, _sum3); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } for (; j + 1 < outw; j += 2) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); __m128 _sum1 = _bias0; __m128 _r03 = _mm_loadu_ps(r0 + 12); __m128 _r13 = _mm_loadu_ps(r1 + 12); __m128 _r23 = _mm_loadu_ps(r2 + 12); __m128 _r04 = _mm_loadu_ps(r0 + 16); __m128 _r14 = _mm_loadu_ps(r1 + 16); __m128 _r24 = _mm_loadu_ps(r2 + 16); _mm_storeu_ps(outptr0, _sum0); _sum1 = _mm_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm_comp_fmadd_ps(_k22, _r24, _sum1); _mm_storeu_ps(outptr0 + 4, _sum1); r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; outptr0 += 8; } for (; j < outw; j++) { __m128 _sum0 = _bias0; __m128 _r00 = _mm_loadu_ps(r0); __m128 _r01 = _mm_loadu_ps(r0 + 4); __m128 _r02 = _mm_loadu_ps(r0 + 8); __m128 _r10 = _mm_loadu_ps(r1); __m128 _r11 = _mm_loadu_ps(r1 + 4); __m128 _r12 = _mm_loadu_ps(r1 + 8); __m128 _r20 = _mm_loadu_ps(r2); __m128 _r21 = _mm_loadu_ps(r2 + 4); __m128 _r22 = _mm_loadu_ps(r2 + 8); _sum0 = _mm_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm_comp_fmadd_ps(_k22, _r22, _sum0); _mm_storeu_ps(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
Calculate_RDs.c
#include "utils.h" #include"num_of_threads.h" #include"matrix_ops.h" void Calculate_Range_Doppler(int *tlist,double *vlist,int nfac,int nvert,double *angles,double *Eo,double TIME,double *freqx,double *freqy,int nfreq,double rfreq,double *offset,double scal,double rexpe,double * Fr,double *Fi); void Calculate_Range_Doppler_deriv(int *tlist,double *vlist,int nfac,int nvert,double *angles,double *Eo,double TIME,double *freqx,double *freqy,int nfreq,double rfreq,double *offset,double scal,double rexpe,double * Fr,double *Fi,double *FTdxr,double *FTdxi,double *FTdyr,double *FTdyi,double *FTdzr,double *FTdzi,double *FTdAr,double *FTdAi,double *FTdoffr,double *FTdoffi,double *FTdexpr,double *FTdexpi); void Calculate_RDs(int *tlist,double *vlist,int nfac,int nvert,double *angles,RDstruct *RDs,double *offset,double *D,int dm,int dn,double *Weight,double *scale,double rexp,double *FT,double *FTdv,double *FTdoff,double *FTdsc,double *FTdxp,int deriv) { /*Same as the original, only exception is the inclusion of matrix D (For effective memory usage) */ int DisNULL=0; int D1V=0; int D3V=0; int UseScale=0; int UseWeight=0; if(scale!=NULL) UseScale=1; int nRD; nRD=RDs->nRD; //Number of RD images /*First some sanity checking*/ if(D==NULL) DisNULL=1; if(!DisNULL && nvert!=dm) { puts("Error: nvert is not equal dm."); exit(1); } if(Weight!=NULL) UseWeight=1; int *nopoints,*cumpoints,ntpoints; nopoints=RDs->nobs; //Array, number of samples in each RD image cumpoints=malloc((nRD+1)*sizeof(int)); cumpoints[0]=0; for(int i=1;i<=nRD;i++) cumpoints[i]=cumpoints[i-1]+nopoints[i-1]; //cumpoints is the cumulative sum of all observation points, used for indexing ntpoints=cumpoints[nRD];//Total number of points if(deriv==0) { omp_set_num_threads(NUM_THREADS); #pragma omp parallel for for(int obsind=0;obsind<nRD;obsind++) { double *FTE,*FTTIME,*FTfreqx,*FTfreqy,*FTrfreq,*datar,*datai; double *FTr,*FTi; double W; if(UseWeight==1) W=Weight[obsind]; else W=1; FTr=calloc(nopoints[obsind],sizeof(double)); FTi=calloc(nopoints[obsind],sizeof(double)); FTE=RDs->E+3*obsind; FTTIME=RDs->TIME+obsind; FTfreqx=RDs->freqx[obsind]; FTfreqy=RDs->freqy[obsind]; FTrfreq=RDs->rfreq+obsind; datar=RDs->datar[obsind]; datai=RDs->datai[obsind]; Calculate_Range_Doppler(tlist,vlist,nfac,nvert,angles,FTE,*FTTIME,FTfreqx,FTfreqy,nopoints[obsind],*FTrfreq,offset+2*obsind,scale[obsind],rexp,FTr,FTi); for(int j=0;j<nopoints[obsind];j++) { FT[j+cumpoints[obsind]]=W*(datar[j]-FTr[j]); FT[j+cumpoints[obsind]+ntpoints]=W*(datai[j]-FTi[j]); } //return; free(FTr); free(FTi); } return; } int nvertf; if(D!=NULL) nvertf=dn; else { nvertf=nvert; dn=nvert; } zero_array(FTdv,2*ntpoints*(3*nvertf+3)); zero_array(FTdoff,2*ntpoints*2*nRD); zero_array(FTdsc,2*ntpoints*nRD); zero_array(FTdxp,2*ntpoints); omp_set_num_threads(NUM_THREADS); #pragma omp parallel for for(int obsind=0;obsind<nRD;obsind++) { int cind=0; int oind=0; double *FTdxr,*FTdxi,*FTdyr,*FTdyi,*FTdzr,*FTdzi,*FTdAr,*FTdAi,*FTdoffr,*FTdoffi,*FTdexpr,*FTdexpi,*FTdxfr,*FTdxfi,*FTdyfr,*FTdyfi,*FTdzfr,*FTdzfi; double *FTE,*FTTIME,*FTfreqx,*FTfreqy,*FTrfreq; double *FTr,*FTi,*datar,*datai; double W; if(UseWeight==1) W=Weight[obsind]; else W=1; // obsind=omp_get_thread_num(); FTr=calloc(nopoints[obsind],sizeof(double)); FTi=calloc(nopoints[obsind],sizeof(double)); FTdxr=calloc(nopoints[obsind]*nvertf,sizeof(double)); FTdxi=calloc(nopoints[obsind]*nvertf,sizeof(double)); FTdyr=calloc(nopoints[obsind]*nvertf,sizeof(double)); FTdyi=calloc(nopoints[obsind]*nvertf,sizeof(double)); FTdzr=calloc(nopoints[obsind]*nvertf,sizeof(double)); FTdzi=calloc(nopoints[obsind]*nvertf,sizeof(double)); FTdAr=calloc(nopoints[obsind]*3,sizeof(double)); FTdAi=calloc(nopoints[obsind]*3,sizeof(double)); FTdoffr=calloc(nopoints[obsind]*2,sizeof(double)); FTdoffi=calloc(nopoints[obsind]*2,sizeof(double)); FTdexpr=calloc(nopoints[obsind],sizeof(double)); FTdexpi=calloc(nopoints[obsind],sizeof(double)); FTE=RDs->E+3*obsind; FTTIME=RDs->TIME+obsind; FTfreqx=RDs->freqx[obsind]; FTfreqy=RDs->freqy[obsind]; FTrfreq=RDs->rfreq+obsind; datar=RDs->datar[obsind]; datai=RDs->datai[obsind]; if(D!=NULL) { FTdxfr=calloc(nopoints[obsind]*nvert,sizeof(double)); FTdyfr=calloc(nopoints[obsind]*nvert,sizeof(double)); FTdzfr=calloc(nopoints[obsind]*nvert,sizeof(double)); FTdxfi=calloc(nopoints[obsind]*nvert,sizeof(double)); FTdyfi=calloc(nopoints[obsind]*nvert,sizeof(double)); FTdzfi=calloc(nopoints[obsind]*nvert,sizeof(double)); Calculate_Range_Doppler_deriv(tlist,vlist,nfac,nvert,angles,FTE,*FTTIME,FTfreqx,FTfreqy,nopoints[obsind],*FTrfreq,offset+2*obsind,scale[obsind],rexp,FTr,FTi,FTdxfr,FTdxfi,FTdyfr,FTdyfi,FTdzfr,FTdzfi,FTdAr,FTdAi,FTdoffr,FTdoffi,FTdexpr,FTdexpi); //Convert from vlistn->vlist. Only because we want to minimize memory usage matrix_prod(FTdxfr,nopoints[obsind],nvert,D,nvertf,FTdxr); matrix_prod(FTdxfi,nopoints[obsind],nvert,D,nvertf,FTdxi); free(FTdxfr); free(FTdxfi); matrix_prod(FTdyfr,nopoints[obsind],nvert,D,nvertf,FTdyr); matrix_prod(FTdyfi,nopoints[obsind],nvert,D,nvertf,FTdyi); free(FTdyfr); free(FTdyfi); matrix_prod(FTdzfr,nopoints[obsind],nvert,D,nvertf,FTdzr); matrix_prod(FTdzfi,nopoints[obsind],nvert,D,nvertf,FTdzi); free(FTdzfr); free(FTdzfi); } else Calculate_Range_Doppler_deriv(tlist,vlist,nfac,nvert,angles,FTE,*FTTIME,FTfreqx,FTfreqy,nopoints[obsind],*FTrfreq,offset+2*obsind,scale[obsind],rexp,FTr,FTi,FTdxr,FTdxi,FTdyr,FTdyi,FTdzr,FTdzi,FTdAr,FTdAi,FTdoffr,FTdoffi,FTdexpr,FTdexpi); for(int j=0;j<nopoints[obsind];j++) { FT[j+cumpoints[obsind]]=W*(datar[j]-FTr[j]); FT[j+cumpoints[obsind]+ntpoints]=W*(datai[j]-FTi[j]); } // print_matrix(vlist,10,3); // print_matrix(angles,1,3); // print_matrix(offset,1,2); // printf("Scale: %f rexp:%f TIME: %f, E: %f %f %f\n",scale[0],rexp,*FTTIME,FTE[0],FTE[1],FTE[2]); // write_matrix_file("/tmp/FTfreqx.txt",FTfreqx,1,nopoints[obsind]); // write_matrix_file("/tmp/FTfreqy.txt",FTfreqy,1,nopoints[obsind]); // write_matrix_file("/tmp/FTr.txt",FTr,1,nopoints[obsind]); // write_matrix_file("/tmp/FTi.txt",FTi,1,nopoints[obsind]); //Copy variables to matlab cind=cumpoints[obsind]; oind=nopoints[obsind]; if(UseWeight==1) { mult_with_cons(FTdxr,oind,dn,W); mult_with_cons(FTdxi,oind,dn,W); mult_with_cons(FTdyr,oind,dn,W); mult_with_cons(FTdyi,oind,dn,W); mult_with_cons(FTdzr,oind,dn,W); mult_with_cons(FTdzi,oind,dn,W); mult_with_cons(FTdAr,oind,3,W); mult_with_cons(FTdAi,oind,3,W); mult_with_cons(FTdoffr,oind,2,W); mult_with_cons(FTdoffi,oind,2,W); mult_with_cons(FTr,oind,1,W); mult_with_cons(FTi,oind,1,W); mult_with_cons(FTdexpr,oind,1,W); mult_with_cons(FTdexpi,oind,1,W); } set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdxr,oind,dn,cind,0); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdxi,oind,dn,cind+ntpoints,0); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdyr,oind,dn,cind,dn); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdyi,oind,dn,cind+ntpoints,dn); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdzr,oind,dn,cind,2*dn); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdzi,oind,dn,cind+ntpoints,2*dn); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdAr,oind,3,cind,3*dn); set_submatrix(FTdv,2*ntpoints,3*dn+3,FTdAi,oind,3,cind+ntpoints,3*dn); set_submatrix(FTdoff,2*ntpoints,2*nRD,FTdoffr,oind,2,cind,2*obsind); set_submatrix(FTdoff,2*ntpoints,2*nRD,FTdoffi,oind,2,cind+ntpoints,2*obsind); set_submatrix(FTdsc,2*ntpoints,nRD,FTr,oind,1,cind,obsind); set_submatrix(FTdsc,2*ntpoints,nRD,FTi,oind,1,cind+ntpoints,obsind); set_submatrix(FTdxp,2*ntpoints,1,FTdexpr,oind,1,cind,0); set_submatrix(FTdxp,2*ntpoints,1,FTdexpi,oind,1,cind+ntpoints,0); free(FTr); free(FTi); free(FTdxr); free(FTdxi); free(FTdyr); free(FTdyi); free(FTdzr); free(FTdzi); free(FTdAr); free(FTdAi); free(FTdoffr); free(FTdoffi); free(FTdexpr); free(FTdexpi); } } /* int main() { // int tlist[]={1,2,3, // 1,3,4, // 2,4,3, // 1,2,4}; //4 facets // double vlist[]={0.0,-2.0,0.0 // ,0.5,0.0,-1.0, // 0.0,1.0,1.0, // -3,1,4}; // // int nvert=4; // int nfac=4; int *tlist; double *vlist; int nfac,nvert; char file[]="mshape.txt"; read_shape(file,&tlist,&vlist,&nfac,&nvert,0); int nobs[]={29,29,29}; int nRD=3; int ntpoints=3*29; //double E[]={1,0,0}; double E2[]={1,0.1,0.1}; double E[9]; E[0]=1; E[1]=0; E[2]=0; E[6]=1; E[7]=0; E[8]=0; double rfreq[]={90e9,90e9,90e9}; double norm=NORM(E2); //printf("norm: %f\n",norm); for(int j=0;j<3;j++) E[j+3]=E2[j]/norm; double TIME[]={0.1,0.2,-0.1}; double distance[]={0.00137879506,0.00137879506,0.00137879506}; //double scale[]={0.1,0.1,0.1}; double *datar=calloc(29,sizeof(double)); double *datai=calloc(29,sizeof(double)); double freqx[]={-1.0000, -0.9300, -0.8600, -0.7900, -0.7200, -0.6500, -0.5800, -0.5100, -0.4400, -0.3700, -0.3000, -0.2300, -0.1600, -0.0900, -0.0200, 0.0500, 0.1200, 0.1900, 0.2600, 0.3300, 0.4000, 0.4700, 0.5400, 0.6100, 0.6800, 0.7500, 0.8200, 0.8900, 0.9600}; double freqy[]={1.2900, 1.2200, 1.1500, 1.0800, 1.0100, 0.9400, 0.8700, 0.8000, 0.7300, 0.6600, 0.5900, 0.5200, 0.4500, 0.3800, 0.3100, 0.2400, 0.1700, 0.1000, 0.0300, -0.0400, -0.1100, -0.1800, -0.2500, -0.3200, -0.3900, -0.4600, -0.5300, -0.6000, -0.6700, }; double freqy2[]={-0.3,0.05}; double freqy3[]={-0.5,-0.1}; double freqx2[]={0.1,0.15}; double angles[]={0,0,30,0}; double offset[]={0,0,0,0,0,0}; double D[]={1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1}; double psf[]={0.5377,1.8339,-2.2588, 0.8622 ,0.3188 , -1.3077, -0.4336, 0.3426 , 3.5784, 2.7694, -1.3499, 3.0349, 0.7254, -0.0631 , 0.7147, -0.2050, -0.1241, 1.4897 , 1.4090, 1.4172, 0.6715 , -1.2075, 0.7172 , 1.6302, 0.4889 , 1.0347, 0.7269, -0.3034, 0.2939}; double *Weight; double *FT,*FTdv; double *FTdoff=calloc(2*ntpoints*2*nRD,sizeof(double)); double *FTdsc=calloc(2*ntpoints*nRD,sizeof(double)); double *FTdxp=calloc(2*ntpoints,sizeof(double)); FT=calloc(2*ntpoints,sizeof(double)); FTdv=calloc(2*ntpoints*(3*nvert+3),sizeof(double)); RDstruct RD; RD.nRD=3; RD.nobs=nobs; RD.datar=calloc(nRD,sizeof(double*)); RD.datai=calloc(nRD,sizeof(double*)); RD.freqx=calloc(nRD,sizeof(double*)); RD.freqy=calloc(nRD,sizeof(double*)); RD.datar[0]=datar; RD.datai[0]=datai; RD.freqx[0]=freqx; RD.freqy[0]=freqy; RD.datar[1]=datar; RD.datai[1]=datai; RD.freqx[1]=freqx; RD.freqy[1]=freqy; RD.datar[2]=datar; RD.datai[2]=datai; RD.freqx[2]=freqx; RD.freqy[2]=freqy; RD.rfreq=rfreq; RD.E=E; RD.TIME=TIME; RD.distance=distance; printf("nfac: %d nvert: %d\n",nfac,nvert); double scale[]={0.0,0.0,0.0}; Calculate_RDs(tlist,vlist,nfac,nvert,angles,&RD,offset,NULL,nvert,nvert,scale,0.69,FT,FTdv,FTdoff,FTdsc,FTdxp,1); //Calculate_AOs(tlist,vlist,nfac,nvert,angles,&AO,offset,NULL,nvert,nvert,Weight,NULL,FT,FTdv,NULL,1); //print_matrix(FT,1,2*ntpoints); //print_matrix(FTdv,2*ntpoints,3*nvert+2*nao+3); write_matrix_file("/tmp/FT.txt",FT,2*ntpoints,1); write_matrix_file("/tmp/FTdv.txt",FTdv,2*ntpoints,3*nvert+3); write_matrix_file("/tmp/FTdoff.txt",FTdoff,2*ntpoints,2*nRD); write_matrix_file("/tmp/FTdsc.txt",FTdsc,2*ntpoints,nRD); free(FT); free(FTdv); } */
BEIntegratorLaplace.h
/*! * @file BEIntegratorLaplace.h * @author Michal Merta * @author Jan Zapletal * @date July 10, 2013 * @brief Header file for class BEIntegratorLaplace * */ #ifndef BEINTEGRATORLAPLACE_H #define BEINTEGRATORLAPLACE_H #include "BEIntegrator.h" //#include "BESpace.h" namespace bem4i { /*! * concrete class for Laplace integrators over triangular elements * * the class uses Curiously recurring template pattern to replace virtual methods */ template<class LO, class SC> class BEIntegratorLaplace : public BEIntegrator<LO, SC, BEIntegratorLaplace<LO, SC> > { // to get inner type of complex numbers (for Helmholtz) typedef typename GetType<LO, SC>::SCVT SCVT; // we have to enable BEIntegrator to use kernel evaluation private methods friend class BEIntegrator<LO, SC, BEIntegratorLaplace<LO, SC> >; public: //! default constructor BEIntegratorLaplace( ); //! copy constructor BEIntegratorLaplace( const BEIntegratorLaplace& orig ); //! constructor taking BESpace as the argument BEIntegratorLaplace( BESpace<LO, SC>* space, int* quadratureOrder, quadratureType quadrature = SauterSchwab, int* quadratureOrderDisjointElems = nullptr ); //! destructor virtual ~BEIntegratorLaplace( ); //! returns element matrix of single layer potential void computeElemMatrix1Layer( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of double layer potential void computeElemMatrix2Layer( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of hypersingular operator void computeElemMatrixHypersingular( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of hypersingular operator void computeElemMatrixHypersingular( LO outerElem, LO innerElem, FullMatrix<LO, SC>& V, FullMatrix<LO, SC>& matrix ) const; /*! * evaluates the representation formula in points x, stores values in * preallocated vector values * * @param[in] x pointer to array with evaluation points * @param[in] n number of evaluation points * @param[in] dir Dirichlet data * @param[in] neu Neumann data * @param[in] interior flag interior/exterior * @param[out] values preallocated vector for storing results */ void representationFormula( const SC * x, LO nPoints, const Vector<LO, SC> & dir, const Vector<LO, SC> & neu, bool interior, Vector<LO, SC> & values ) const { if ( this->space->getTestFunctionType( ) == p1 && this->space->getAnsatzFunctionType( ) == p1 ) { this->representationFormulaP1P1( x, nPoints, dir, neu, interior, values ); } else if ( this->space->getTestFunctionType( ) == p0 && this->space->getAnsatzFunctionType( ) == p0 ) { std::cout << "Not implemented!" << std::endl; //this->representationFormulaP0P0( x, nPoints, dir, neu, interior, values ); } else { this->representationFormulaP1P0( x, nPoints, dir, neu, interior, values ); } } /*! * evaluates the representation formula in points x * for p1 Dirchlet, p0 Neumann data, * stores values in preallocated vector values * * @param[in] x pointer to array with evaluation points * @param[in] n number of evaluation points * @param[in] dir Dirichlet data * @param[in] neu Neumann data * @param[in] interior flag interior/exterior * @param[out] values preallocated vector for storing results */ void representationFormulaP1P0( const SCVT * x, LO n, const Vector<LO, SC> & dir, const Vector<LO, SC> & neu, bool interior, Vector<LO, SC> & values ) const; void representationFormulaP1P1( const SCVT * x, LO n, const Vector<LO, SC> & dir, const Vector<LO, SC> & neu, bool interior, Vector<LO, SC> & values ) const; /* * evaluates double layer potential in points x, stores values in * preallocated vector values * * @param[in] x pointer to array with evaluation points * @param[in] nPoints number of evaluation points * @param[in] density density function * @param[out] values preallocated vector for storing results */ void doubleLayerPotential( const SCVT * x, LO nPoints, const Vector<LO, SC> & density, Vector<LO, SC> & values ) const { doubleLayerPotentialP1( x, nPoints, density, values ); } /* * evaluates double layer potential in points x * for p1 density, * stores values in preallocated vector values * * @param[in] x pointer to array with evaluation points * @param[in] nPoints number of evaluation points * @param[in] density density function * @param[out] values preallocated vector for storing results */ void doubleLayerPotentialP1( const SCVT * x, LO nPoints, const Vector<LO, SC> & density, Vector<LO, SC> & values ) const; //#if N_MIC > 0 //! computes element matrix on MIC #if N_MIC > 0 __attribute__ ( ( target( mic ) ) ) #endif static void computeElemMatrix1LayerP0P0MIC( const SCVT * nodes, const LO * elems, const SCVT * areas, LO outerElem, LO innerElem, int qOrdOuter, int qOrdInner, const SCVT * outerX1ref, const SCVT * outerX2ref, const SCVT * innerX1ref, const SCVT * innerX2ref, SCVT * outerX1, SCVT * outerX2, SCVT * outerX3, SCVT * innerX1, SCVT * innerX2, SCVT * innerX3, const SCVT * vOutW, const SCVT * vInW, SC * elemMatrix ); #if N_MIC > 0 __attribute__ ( ( target( mic ) ) ) #endif static void computeElemMatrix2LayerP0P1MIC( const SCVT * nodes, const LO * elems, const SCVT * areas, const SCVT * normals, LO outerElem, LO innerElem, int qOrderOuter, int qOrderInner, const SCVT * outerX1ref, const SCVT * outerX2ref, const SCVT * innerX1ref, const SCVT * innerX2ref, SCVT * outerX1, SCVT * outerX2, SCVT * outerX3, SCVT * innerX1, SCVT * innerX2, SCVT * innerX3, const SCVT * phi1Values, const SCVT * phi2Values, const SCVT * phi3Values, const SCVT * vOutW, const SCVT * vInW, SC * elemMatrix ); #if N_MIC > 0 __attribute__ ( ( target( mic ) ) ) #endif static void computeElemMatrix1And2LayerP0P0P0P1MIC( const SCVT * nodes, const LO * elems, const SCVT * areas, const SCVT * normals, LO outerElem, LO innerElem, int qOrderOuter, int qOrderInner, const SCVT * outerX1ref, const SCVT * outerX2ref, const SCVT * innerX1ref, const SCVT * innerX2ref, SCVT * outerX1, SCVT * outerX2, SCVT * outerX3, SCVT * innerX1, SCVT * innerX2, SCVT * innerX3, const SCVT * phi1Values, const SCVT * phi2Values, const SCVT * phi3Values, const SCVT * vOutW, const SCVT * vInW, SC * elemV, SC * elemK ); //#endif private: //! returns element matrix of single layer potential with p0p0 approximation void computeElemMatrix1LayerP0P0( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of single layer potential with p0p0 approximation void computeElemMatrix1LayerP1P1( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of double layer potential with p0p1 approximation void computeElemMatrix2LayerP0P1( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns element matrix of double layer potential with p0p0 approximation void computeElemMatrix2LayerP0P0( LO outerElem, LO innerElem, FullMatrix<LO, SC> & matrix ) const; //! returns element matrix of double layer potential with p0p0 approximation void computeElemMatrix2LayerP1P1( LO outerElem, LO innerElem, FullMatrix<LO, SC> & matrix ) const; //! returns element matrix of hypersingular operator with p1p1 approximation void computeElemMatrixHypersingularP1P1( LO outerElem, LO innerElem, FullMatrix<LO, SC>& matrix ) const; //! returns specific kernel evaluated in given points (x, y) SC evalSingleLayerKernel( const SCVT *x, const SCVT *y ) const { SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); return ( PI_FACT / norm ); }; #pragma omp declare simd simdlen( DATA_WIDTH ) SC evalSingleLayerKernel( SCVT x1, SCVT x2, SCVT x3, SCVT y1, SCVT y2, SCVT y3 ) const { SCVT norm = std::sqrt( ( x1 - y1 ) * ( x1 - y1 ) + ( x2 - y2 ) * ( x2 - y2 ) + ( x3 - y3 ) * ( x3 - y3 ) ); return ( PI_FACT / norm ); }; //! returns specific kernel evaluated in given points (x, y) #pragma omp declare simd uniform( n1, n2, n3 ) simdlen( DATA_WIDTH ) SC evalDoubleLayerKernel( SCVT x1, SCVT x2, SCVT x3, SCVT y1, SCVT y2, SCVT y3, SCVT n1, SCVT n2, SCVT n3 ) const { SCVT diff1 = x1 - y1; SCVT diff2 = x2 - y2; SCVT diff3 = x3 - y3; SCVT norm = std::sqrt( diff1 * diff1 + diff2 * diff2 + diff3 * diff3 ); SCVT dot = diff1 * n1 + diff2 * n2 + diff3 * n3; return ( PI_FACT * dot / ( norm * norm * norm ) ); }; SC evalDoubleLayerKernel( const SCVT *x, const SCVT *y, const SCVT* n ) const { SCVT norm = std::sqrt( ( x[0] - y[0] )*( x[0] - y[0] ) + ( x[1] - y[1] )*( x[1] - y[1] ) + ( x[2] - y[2] )*( x[2] - y[2] ) ); SCVT dot = ( x[0] - y[0] ) * n[0] + ( x[1] - y[1] ) * n[1] + ( x[2] - y[2] ) * n[2]; return ( PI_FACT * dot / ( norm * norm * norm ) ); }; #pragma omp declare simd uniform( stau, alpha1, alpha2 ) simdlen( DATA_WIDTH ) SC collocation1LayerP0( SC sx, SC tx, SC ux, SC stau, SC alpha1, SC alpha2 ) const; //! evaluates p0 Laplace single layer operator in xLocal #pragma omp declare simd uniform( stau, alpha1, alpha2 ) simdlen( DATA_WIDTH ) SC collocation1LayerP1( SC sx, SC tx, SC ux, SC stau, SC alpha1, SC alpha2 ) const; #pragma omp declare simd uniform( s, alpha ) simdlen( DATA_WIDTH ) SC f1LayerP0( SC sx, SC tx, SC ux, SC s, SC alpha ) const; //! help function for collocation of single layer operator #pragma omp declare simd uniform( stau, alpha ) simdlen( DATA_WIDTH ) SC f1LayerP1( SC stau, SC alpha, SC tx, SC sx, SC ux ) const; #pragma omp declare simd uniform( stau, alpha1, alpha2 ) simdlen( DATA_WIDTH ) SC collocation2LayerP1( SC sx, SC tx, SC ux, SC stau, SC alpha1, SC alpha2 ) const; #pragma omp declare simd simdlen( DATA_WIDTH ) void collocation2LayerP1All( int rot, SC sx, SC tx, SC ux, SC stau, SC alpha1, SC alpha2, SC & ret1, SC & ret2, SC & ret3 ) const; //! help function for collocation of double layer operator #pragma omp declare simd uniform( alpha, stau ) simdlen( DATA_WIDTH ) SC f2LayerP1( SC alpha, SC tx, SC sx, SC ux, SC stau ) const; #pragma omp declare simd simdlen( DATA_WIDTH ) void f2LayerP1All( SC alpha, SC tx, SC sx, SC ux, SC stau, SC alpha1, SC alpha2, SC & ret1, SC & ret2, SC & ret3 ) const; //! evaluates p0 Laplace double layer operator in xLocal #pragma omp declare simd uniform( stau, alpha1, alpha2 ) simdlen( DATA_WIDTH ) SC collocation2LayerP0( SC sx, SC tx, SC ux, SC stau, SC alpha1, SC alpha2 ) const; //! help function for collocation of double layer operator #pragma omp declare simd uniform( alpha, stau ) simdlen( DATA_WIDTH ) SC f2LayerP0( SC alpha, SC tx, SC sx, SC ux, SC stau ) const; }; } // include .cpp file to overcome linking problems due to templates #include "BEIntegratorLaplace.cpp" #endif /* BEINTEGRATORLAPLACE_H */
wbb3_fmt_plug.c
/* WoltLab Burning Board 3 (WBB3) cracker patch for JtR. Hacked together during * May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$wbb3$*type*hash * * Where, * * type => 1, for sha1($salt.sha1($salt.sha1($pass))) hashing scheme * * JimF, July 2012. * Made small change in hex_encode 10x improvement in speed. Also some other * changes. Should be a thin dyanamic. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_wbb3; #elif FMT_REGISTERS_H john_register_one(&fmt_wbb3); #else #include "arch.h" #include "sha.h" #include <string.h> #include <assert.h> #include <errno.h> #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "wbb3" #define FORMAT_NAME "WoltLab BB3" #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 20 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 static struct fmt_tests wbb3_tests[] = { {"$wbb3$*1*0b053db07dc02bc6f6e24e00462f17e3c550afa9*e2063f7c629d852302d3020599376016ff340399", "123456"}, {"$wbb3$*1*0b053db07dc02bc6f6e24e00462f17e3c550afa9*f6975cc560c5d03feb702158d08f90bf2fa773d6", "password"}, {"$wbb3$*1*a710463f75bf4568d398db32a53f9803007388a3*2c56d23b44eb122bb176dfa2a1452afaf89f1143", "123456"}, {"$wbb3$*1*1039145e9e785ddb2ac7ccca89ac1b159b595cc1*2596b5f8e7cdaf4b15604ad336b810e8e2935b1d", "12345678"}, {"$wbb3$*1*db763342e23f8ccdbd9c90d1cc7896d80b7e0a44*26496a87c1a7dd68f7beceb2fc40b6fc4223a453", "12345678"}, {"$wbb3$*1*bf2c7d0c8fb6cb146adf8933e32da012d31b5bbb*d945c02cf85738b7db4f4f05edd676283280a513", "123456789"}, {"$wbb3$*1*d132b22d3f1d942b99cc1f5fbd5cc3eb0824d608*e3e03fe02223c5030e834f81997f614b43441853", "1234567890"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned char (*hexhash1)[40]; static int dirty; static struct custom_salt { int type; unsigned char salt[41]; } *cur_salt; static inline void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); hexhash1 = mem_calloc(self->params.max_keys_per_crypt, sizeof(*hexhash1)); } static void done(void) { MEM_FREE(hexhash1); MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char _ctcopy[256], *ctcopy = _ctcopy; char *p; int res; if (strncmp(ciphertext, "$wbb3$*", 7)) return 0; strnzcpy(ctcopy, ciphertext, 255); ctcopy += 7; p = strtokm(ctcopy, "*"); /* type */ if(!p) goto err; if(!isdec(p)) goto err; res = atoi(p); if (res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; res = strlen(p); if (hexlenl(p) != res) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (hexlenl(p) != BINARY_SIZE * 2) goto err; return 1; err: return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char _ctcopy[256], *ctcopy = _ctcopy; char *p; memset(&cs, 0, sizeof(cs)); strnzcpy(ctcopy, ciphertext, 255); ctcopy += 7; /* skip over "$wbb3$*" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); strcpy((char *)cs.salt, p); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char hexhash[40]; SHA_CTX ctx; if (dirty) { unsigned char out[20]; SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Final(out, &ctx); hex_encode(out, 20, hexhash1[index]); } SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 40); SHA1_Update(&ctx, hexhash1[index], 40); SHA1_Final((unsigned char*)crypt_out[index], &ctx); hex_encode((unsigned char*)crypt_out[index], 20, hexhash); SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 40); SHA1_Update(&ctx, hexhash, 40); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((ARCH_WORD_32*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((ARCH_WORD_32*)binary) == crypt_out[index][0]; } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void wbb3_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_wbb3 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, wbb3_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, wbb3_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
omptarget.h
//===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the declarations of all library macros, types, // and functions. // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_H #define OMPTARGET_H #include "common/allocator.h" #include "common/debug.h" // debug #include "common/state-queue.h" #include "common/support.h" #include "interface.h" // interfaces with omp, compiler, and user #include "target_impl.h" #define OMPTARGET_NVPTX_VERSION 1.1 // used by the library for the interface with the app #define DISPATCH_FINISHED 0 #define DISPATCH_NOTFINISHED 1 // used by dynamic scheduling #define FINISHED 0 #define NOT_FINISHED 1 #define LAST_CHUNK 2 #define BARRIER_COUNTER 0 #define ORDERED_COUNTER 1 // arguments needed for L0 parallelism only. class omptarget_nvptx_SharedArgs { public: // All these methods must be called by the master thread only. INLINE void Init() { args = buffer; nArgs = MAX_SHARED_ARGS; } INLINE void DeInit() { // Free any memory allocated for outlined parallel function with a large // number of arguments. if (nArgs > MAX_SHARED_ARGS) { SafeFree(args, "new extended args"); Init(); } } INLINE void EnsureSize(size_t size) { if (size > nArgs) { if (nArgs > MAX_SHARED_ARGS) { SafeFree(args, "new extended args"); } args = (void **)SafeMalloc(size * sizeof(void *), "new extended args"); nArgs = size; } } // Called by all threads. INLINE void **GetArgs() const { return args; }; private: // buffer of pre-allocated arguments. void *buffer[MAX_SHARED_ARGS]; // pointer to arguments buffer. // starts off as a pointer to 'buffer' but can be dynamically allocated. void **args; // starts off as MAX_SHARED_ARGS but can increase in size. uint32_t nArgs; }; extern omptarget_nvptx_SharedArgs EXTERN_SHARED(omptarget_nvptx_globalArgs); // Worker slot type which is initialized with the default worker slot // size of 4*32 bytes. struct __kmpc_data_sharing_slot { __kmpc_data_sharing_slot *Next; __kmpc_data_sharing_slot *Prev; void *PrevSlotStackPtr; void *DataEnd; char Data[DS_Worker_Warp_Slot_Size]; }; // Data structure to keep in shared memory that traces the current slot, stack, // and frame pointer as well as the active threads that didn't exit the current // environment. struct DataSharingStateTy { __kmpc_data_sharing_slot *SlotPtr[DS_Max_Warp_Number]; void *StackPtr[DS_Max_Warp_Number]; void *volatile FramePtr[DS_Max_Warp_Number]; __kmpc_impl_lanemask_t ActiveThreads[DS_Max_Warp_Number]; }; extern DataSharingStateTy EXTERN_SHARED(DataSharingState); //////////////////////////////////////////////////////////////////////////////// // task ICV and (implicit & explicit) task state class omptarget_nvptx_TaskDescr { public: // methods for flags INLINE omp_sched_t GetRuntimeSched() const; INLINE void SetRuntimeSched(omp_sched_t sched); INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; } INLINE int InL2OrHigherParallelRegion() const { return items.flags & TaskDescr_InParL2P; } INLINE int IsParallelConstruct() const { return items.flags & TaskDescr_IsParConstr; } INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); } // methods for other fields INLINE uint16_t &ThreadId() { return items.threadId; } INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; } INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; } INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) { prev = taskDescr; } // init & copy INLINE void InitLevelZeroTaskDescr(); INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr); INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr); INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr, uint16_t tid, uint16_t tnum); INLINE void SaveLoopData(); INLINE void RestoreLoopData() const; private: // bits for flags: (6 used, 2 free) // 3 bits (SchedMask) for runtime schedule // 1 bit (InPar) if this thread has encountered one or more parallel region // 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task) // 1 bit (InParL2+) if this thread has encountered L2 or higher parallel // region static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4); static const uint8_t TaskDescr_InPar = 0x10; static const uint8_t TaskDescr_IsParConstr = 0x20; static const uint8_t TaskDescr_InParL2P = 0x40; struct SavedLoopDescr_items { int64_t loopUpperBound; int64_t nextLowerBound; int64_t chunk; int64_t stride; kmp_sched_t schedule; } loopData; struct TaskDescr_items { uint8_t flags; // 6 bit used (see flag above) uint8_t unused; uint16_t threadId; // thread id uint64_t runtimeChunkSize; // runtime chunk size } items; omptarget_nvptx_TaskDescr *prev; }; // build on kmp typedef struct omptarget_nvptx_ExplicitTaskDescr { omptarget_nvptx_TaskDescr taskDescr; // omptarget_nvptx task description (must be first) kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last) } omptarget_nvptx_ExplicitTaskDescr; //////////////////////////////////////////////////////////////////////////////// // Descriptor of a parallel region (worksharing in general) class omptarget_nvptx_WorkDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; } private: omptarget_nvptx_TaskDescr masterTaskICV; }; //////////////////////////////////////////////////////////////////////////////// class omptarget_nvptx_TeamDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() { return &levelZeroTaskDescr; } INLINE omptarget_nvptx_WorkDescr &WorkDescr() { return workDescrForActiveParallel; } // init INLINE void InitTeamDescr(); INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) { worker_rootS[wid].DataEnd = &worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size; // We currently do not have a next slot. worker_rootS[wid].Next = 0; worker_rootS[wid].Prev = 0; worker_rootS[wid].PrevSlotStackPtr = 0; return (__kmpc_data_sharing_slot *)&worker_rootS[wid]; } private: omptarget_nvptx_TaskDescr levelZeroTaskDescr; // icv for team master initial thread omptarget_nvptx_WorkDescr workDescrForActiveParallel; // one, ONLY for the active par ALIGN(16) __kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number]; }; //////////////////////////////////////////////////////////////////////////////// // thread private data (struct of arrays for better coalescing) // tid refers here to the global thread id // do not support multiple concurrent kernel a this time class omptarget_nvptx_ThreadPrivateContext { public: // task INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) { return &levelOneTaskDescr[tid]; } INLINE void SetTopLevelTaskDescr(int tid, omptarget_nvptx_TaskDescr *taskICV) { topTaskDescr[tid] = taskICV; } INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const; // parallel INLINE uint16_t &NumThreadsForNextParallel(int tid) { return nextRegion.tnum[tid]; } // schedule (for dispatch) INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; } INLINE int64_t &Chunk(int tid) { return chunk[tid]; } INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; } INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; } INLINE int64_t &Stride(int tid) { return stride[tid]; } INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; } INLINE void InitThreadPrivateContext(int tid); INLINE uint64_t &Cnt() { return cnt; } private: // team context for this team omptarget_nvptx_TeamDescr teamContext; // task ICV for implicit threads in the only parallel region omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM]; // pointer where to find the current task ICV (top of the stack) omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM]; union { // Only one of the two is live at the same time. // parallel uint16_t tnum[MAX_THREADS_PER_TEAM]; } nextRegion; // schedule (for dispatch) kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for int64_t chunk[MAX_THREADS_PER_TEAM]; int64_t loopUpperBound[MAX_THREADS_PER_TEAM]; // state for dispatch with dyn/guided OR static (never use both at a time) int64_t nextLowerBound[MAX_THREADS_PER_TEAM]; int64_t stride[MAX_THREADS_PER_TEAM]; uint64_t cnt; }; /// Memory manager for statically allocated memory. class omptarget_nvptx_SimpleMemoryManager { private: struct MemDataTy { volatile unsigned keys[OMP_STATE_COUNT]; } MemData[MAX_SM] ALIGN(128); INLINE static uint32_t hash(unsigned key) { return key & (OMP_STATE_COUNT - 1); } public: INLINE void Release(); INLINE const void *Acquire(const void *buf, size_t size); }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // global data tables //////////////////////////////////////////////////////////////////////////////// extern omptarget_nvptx_SimpleMemoryManager omptarget_nvptx_simpleMemoryManager; extern uint32_t EXTERN_SHARED(usedMemIdx); extern uint32_t EXTERN_SHARED(usedSlotIdx); #if _OPENMP extern uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE]; #pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc) #else extern uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE]; #endif extern uint16_t EXTERN_SHARED(threadLimit); extern uint16_t EXTERN_SHARED(threadsInTeam); extern uint16_t EXTERN_SHARED(nThreads); extern omptarget_nvptx_ThreadPrivateContext * EXTERN_SHARED(omptarget_nvptx_threadPrivateContext); extern uint32_t EXTERN_SHARED(execution_param); extern void *EXTERN_SHARED(ReductionScratchpadPtr); //////////////////////////////////////////////////////////////////////////////// // work function (outlined parallel/simd functions) and arguments. // needed for L1 parallelism only. //////////////////////////////////////////////////////////////////////////////// typedef void *omptarget_nvptx_WorkFn; extern volatile omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn); //////////////////////////////////////////////////////////////////////////////// // get private data structures //////////////////////////////////////////////////////////////////////////////// INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor(); INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor(); INLINE omptarget_nvptx_TaskDescr * getMyTopTaskDescriptor(bool isSPMDExecutionMode); INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId); //////////////////////////////////////////////////////////////////////////////// // inlined implementation //////////////////////////////////////////////////////////////////////////////// INLINE uint32_t __kmpc_impl_ffs(uint32_t x) { return __builtin_ffs(x); } INLINE uint32_t __kmpc_impl_popc(uint32_t x) { return __builtin_popcount(x); } INLINE uint32_t __kmpc_impl_ffs(uint64_t x) { return __builtin_ffsl(x); } INLINE uint32_t __kmpc_impl_popc(uint64_t x) { return __builtin_popcountl(x); } #include "common/omptargeti.h" #endif
oskar_cross_correlate_gaussian_scalar_omp.c
/* * Copyright (c) 2014-2015, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include "correlate/private_correlate_functions_inline.h" #include "correlate/oskar_cross_correlate_gaussian_scalar_omp.h" #ifdef __cplusplus extern "C" { #endif /* Single precision. */ void oskar_cross_correlate_gaussian_scalar_omp_f(int num_sources, int num_stations, const float2* jones, const float* source_I, const float* source_l, const float* source_m, const float* source_n, const float* source_a, const float* source_b, const float* source_c, const float* station_u, const float* station_v, const float* station_w, float uv_min_lambda, float uv_max_lambda, float inv_wavelength, float frac_bandwidth, float2* vis) { int SQ; /* Loop over stations. */ #pragma omp parallel for private(SQ) schedule(dynamic, 1) for (SQ = 0; SQ < num_stations; ++SQ) { int SP, i; const float2 *station_p, *station_q; /* Pointer to source vector for station q. */ station_q = &jones[SQ * num_sources]; /* Loop over baselines for this station. */ for (SP = SQ + 1; SP < num_stations; ++SP) { float uv_len, uu, vv, ww, uu2, vv2, uuvv; float2 sum, guard; sum.x = 0.0f; sum.y = 0.0f; guard.x = 0.0f; guard.y = 0.0f; /* Pointer to source vector for station p. */ station_p = &jones[SP * num_sources]; /* Get common baseline values. */ oskar_evaluate_baseline_terms_inline_f(station_u[SP], station_u[SQ], station_v[SP], station_v[SQ], station_w[SP], station_w[SQ], inv_wavelength, frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv); /* Apply the baseline length filter. */ if (uv_len < uv_min_lambda || uv_len > uv_max_lambda) continue; /* Loop over sources. */ for (i = 0; i < num_sources; ++i) { float l, m, n, r1, r2; /* Get source direction cosines. */ l = source_l[i]; m = source_m[i]; n = source_n[i]; /* Compute bandwidth-smearing term. */ r1 = oskar_sinc_f(uu * l + vv * m + ww * (n - 1.0f)); /* Evaluate Gaussian source width term. */ r2 = expf(-(source_a[i] * uu2 + source_b[i] * uuvv + source_c[i] * vv2)); r1 *= r2; /* Accumulate baseline visibility response for source. */ oskar_accumulate_baseline_visibility_for_source_scalar_inline_f( &sum, i, source_I, station_p, station_q, r1, &guard); } /* Add result to the baseline visibility. */ i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ); vis[i].x += sum.x; vis[i].y += sum.y; } } } /* Double precision. */ void oskar_cross_correlate_gaussian_scalar_omp_d(int num_sources, int num_stations, const double2* jones, const double* source_I, const double* source_l, const double* source_m, const double* source_n, const double* source_a, const double* source_b, const double* source_c, const double* station_u, const double* station_v, const double* station_w, double uv_min_lambda, double uv_max_lambda, double inv_wavelength, double frac_bandwidth, double2* vis) { int SQ; /* Loop over stations. */ #pragma omp parallel for private(SQ) schedule(dynamic, 1) for (SQ = 0; SQ < num_stations; ++SQ) { int SP, i; const double2 *station_p, *station_q; /* Pointer to source vector for station q. */ station_q = &jones[SQ * num_sources]; /* Loop over baselines for this station. */ for (SP = SQ + 1; SP < num_stations; ++SP) { double uv_len, uu, vv, ww, uu2, vv2, uuvv; double2 sum; sum.x = 0.0; sum.y = 0.0; /* Pointer to source vector for station p. */ station_p = &jones[SP * num_sources]; /* Get common baseline values. */ oskar_evaluate_baseline_terms_inline_d(station_u[SP], station_u[SQ], station_v[SP], station_v[SQ], station_w[SP], station_w[SQ], inv_wavelength, frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv); /* Apply the baseline length filter. */ if (uv_len < uv_min_lambda || uv_len > uv_max_lambda) continue; /* Loop over sources. */ for (i = 0; i < num_sources; ++i) { double l, m, n, r1, r2; /* Get source direction cosines. */ l = source_l[i]; m = source_m[i]; n = source_n[i]; /* Compute bandwidth-smearing term. */ r1 = oskar_sinc_d(uu * l + vv * m + ww * (n - 1.0)); /* Evaluate Gaussian source width term. */ r2 = exp(-(source_a[i] * uu2 + source_b[i] * uuvv + source_c[i] * vv2)); r1 *= r2; /* Accumulate baseline visibility response for source. */ oskar_accumulate_baseline_visibility_for_source_scalar_inline_d( &sum, i, source_I, station_p, station_q, r1); } /* Add result to the baseline visibility. */ i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ); vis[i].x += sum.x; vis[i].y += sum.y; } } } #ifdef __cplusplus } #endif
tinyexr.h
/* Copyright (c) 2014 - 2018, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety if ((out - 1) <= ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (y + v) * x_stride; } else { outLine += (height - 1 - (y + v)) * x_stride; } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data window value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data window or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
for_schedule_dynamic.c
/* * Test for dynamic scheduling with chunk size * Method: caculate how many times the iteration space is dispatched * and judge if each dispatch has the requested chunk size * unless it is the last one. * It is possible for two adjacent chunks are assigned to the same thread * Modifyied by Chunhua Liao */ #include <stdio.h> #include <omp.h> #include <unistd.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" #define CFDMAX_SIZE 100 int check_for_schedule_dynamic (FILE * logFile) { const int chunk_size = 7; int tid; int tids[CFDMAX_SIZE]; int count = 0; int tmp_count = 0; /*dispatch times*/ int *tmp; /*store chunk size for each dispatch*/ int i; int result = 0; #pragma omp parallel private(tid) shared(tids) { /* begin of parallel */ tid = omp_get_thread_num (); #pragma omp for schedule(dynamic,chunk_size) for (i = 0; i < CFDMAX_SIZE; i++) { tids[i] = tid; } } /* end of parallel */ for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } tmp = (int *) malloc (sizeof (int) * (count + 1)); tmp[0] = 1; for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tmp_count > count) { printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */ break; } if (tids[i] != tids[i + 1]) { tmp_count++; tmp[tmp_count] = 1; } else { tmp[tmp_count]++; } } /* printf("debug----\n"); for (i = 0; i < CFDMAX_SIZE; ++i) printf("%d ",tids[i]); printf("debug----\n"); */ /* is dynamic statement working? */ for (i = 0; i < count; i++) { if ((tmp[i]%chunk_size)!=0) /*it is possible for 2 adjacent chunks assigned to a same thread*/ { result++; fprintf(logFile,"The intermediate dispatch has wrong chunksize.\n"); /*result += ((tmp[i] / chunk_size) - 1);*/ } } if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) { result++; fprintf(logFile,"the last dispatch has wrong chunksize.\n"); } /* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */ return (result==0); } int crosscheck_for_schedule_dynamic (FILE * logFile) { const int chunk_size = 7; int tid; int tids[CFDMAX_SIZE]; int count = 0; int tmp_count = 0; /*dispatch times*/ int *tmp; /*store chunk size for each dispatch*/ int i; int result = 0; #pragma omp parallel private(tid) shared(tids) { /* begin of parallel */ tid = omp_get_thread_num (); #pragma omp for for (i = 0; i < CFDMAX_SIZE; i++) { tids[i] = tid; } } /* end of parallel */ for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } tmp = (int *) malloc (sizeof (int) * (count + 1)); tmp[0] = 1; for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tmp_count > count) { printf ("--------------------\nTestinternal Error: List too small!!!\n--------------------\n"); /* Error handling */ break; } if (tids[i] != tids[i + 1]) { tmp_count++; tmp[tmp_count] = 1; } else { tmp[tmp_count]++; } } /* printf("debug----\n"); for (i = 0; i < CFDMAX_SIZE; ++i) printf("%d ",tids[i]); printf("debug----\n"); */ /* is dynamic statement working? */ for (i = 0; i < count; i++) { if ((tmp[i]%chunk_size)!=0) /*it is possible for 2 adjacent chunks assigned to a same thread*/ { result++; fprintf(logFile,"The intermediate dispatch has wrong chunksize.\n"); /*result += ((tmp[i] / chunk_size) - 1);*/ } } if ((tmp[count]%chunk_size)!=(CFDMAX_SIZE%chunk_size)) { result++; fprintf(logFile,"the last dispatch has wrong chunksize.\n"); } /* for (int i=0;i<count+1;++i) printf("%d\t:=\t%d\n",i+1,tmp[i]); */ return (result==0); }
mpm_search_element_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Bodhinanda Chandra // #ifndef KRATOS_MPM_SEARCH_ELEMENT_UTILITY #define KRATOS_MPM_SEARCH_ELEMENT_UTILITY // System includes // External includes // Project includes #include "includes/define.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/quadrature_points_utility.h" #include "particle_mechanics_application_variables.h" #include "geometries/geometry.h" #include "includes/model_part.h" #include "pqmpm_partition_utilities.h" namespace Kratos { namespace MPMSearchElementUtility { // Standard types typedef std::size_t IndexType; typedef std::size_t SizeType; typedef Node<3> NodeType; typedef typename ModelPart::GeometryType GeometryType; inline double CrossProductDet2D(array_1d<double, 3> VectorA, array_1d<double, 3> VectorB) { return (VectorA[0] * VectorB[1] - VectorB[0] * VectorA[1]); } inline bool CheckIsInside(const GeometryType& rGeom, array_1d<double, 3>& LocalCoords, const array_1d<double, 3>& Coords, const double Tolerance, const bool IsCalcLocalCoords = true) { bool is_inside = true; if (rGeom.Dimension() == 2) { is_inside = true; // Do walk around method Vector cross_products(rGeom.PointsNumber()); for (size_t i = 0; i < rGeom.PointsNumber(); ++i) { if (rGeom.Points()[i].Coordinates()[2] != 0.0) { return rGeom.IsInside(Coords, LocalCoords, Tolerance); break; } cross_products[i] = CrossProductDet2D(Coords - rGeom.Points()[i].Coordinates(), rGeom.Points()[(i + 1) % rGeom.PointsNumber()].Coordinates() - rGeom.Points()[i].Coordinates()); } for (size_t i = 1; i < cross_products.size(); ++i) { if (cross_products[i] * cross_products[0] < -std::abs(Tolerance)) { is_inside = false; break; } } } else return rGeom.IsInside(Coords, LocalCoords, Tolerance); if (is_inside) { if (IsCalcLocalCoords) return rGeom.IsInside(Coords, LocalCoords, Tolerance); else return true; } return false; } inline void ConstructNeighbourRelations(GeometryType& rGeom, const ModelPart& rBackgroundGridModelPart) { std::vector<typename Geometry<Node<3>>::Pointer> geometry_neighbours; for (IndexType j = 0; j < rBackgroundGridModelPart.NumberOfElements(); j++) { auto p_geometry_neighbour = (rBackgroundGridModelPart.ElementsBegin() + j)->pGetGeometry(); if (p_geometry_neighbour->Id() != rGeom.Id()) // dont add the parent as its own neighbour { for (IndexType n = 0; n < p_geometry_neighbour->size(); n++) { for (IndexType k = 0; k < rGeom.size(); k++) { if (rGeom[k].Id() == (*p_geometry_neighbour)[n].Id()) { // Prevent duplicate additions bool add_entry = true; for (size_t i = 0; i < geometry_neighbours.size(); i++) { if (geometry_neighbours[i]->Id() == p_geometry_neighbour->Id()) { add_entry = false; break; } } if (add_entry) { geometry_neighbours.push_back(p_geometry_neighbour); } break; } } } } } #pragma omp critical rGeom.SetValue(GEOMETRY_NEIGHBOURS, geometry_neighbours); } inline bool IsExplicitAndNeedsCorrection(GeometryType::Pointer pQuadraturePoint, const ProcessInfo& rProcessInfo) { if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (pQuadraturePoint->IntegrationPointsNumber() == 1) { for (size_t i = 0; i < pQuadraturePoint->ShapeFunctionsValues().size2(); ++i) { if (pQuadraturePoint->ShapeFunctionsValues()(0, i) < std::numeric_limits<double>::epsilon()) return true; } } } } return false; } inline GeometryType& FindGridGeom(GeometryType& rParentGeom, const ModelPart& rBackgroundGridModelPart, const double Tolerance, const array_1d<double, 3>& xg, array_1d<double, 3>& rLocalCoords, const ProcessInfo& rProcessInfo, bool& IsFound) { IsFound = false; if (CheckIsInside(rParentGeom, rLocalCoords, xg, Tolerance)) { IsFound = true; return rParentGeom; } else { if (!rParentGeom.Has(GEOMETRY_NEIGHBOURS)) ConstructNeighbourRelations(rParentGeom, rBackgroundGridModelPart); auto& geometry_neighbours = rParentGeom.GetValue(GEOMETRY_NEIGHBOURS); for (IndexType k = 0; k < geometry_neighbours.size(); ++k) { if (CheckIsInside(*geometry_neighbours[k], rLocalCoords, xg, Tolerance)) { IsFound = true; return *(geometry_neighbours[k].get()); } } } return rParentGeom; } inline void UpdatePartitionedQuadraturePoint(const ModelPart& rBackgroundGridModelPart, const array_1d<double, 3>& rCoordinates, Element& rMasterMaterialPoint, typename GeometryType::Pointer pQuadraturePointGeometry, const double Tolerance) { KRATOS_TRY; array_1d<double, 3> local_coords; pQuadraturePointGeometry->IsInside(rCoordinates, local_coords, Tolerance); PQMPMPartitionUtilities::PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, rCoordinates, local_coords, rMasterMaterialPoint, pQuadraturePointGeometry, Tolerance); KRATOS_CATCH(""); } inline void NeighbourSearchElements(const ModelPart& rMPMModelPart, const ModelPart& rBackgroundGridModelPart, std::vector<typename Element::Pointer>& rMissingElements, const double Tolerance) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rMPMModelPart.Elements().size()); ++i) { auto element_itr = (rMPMModelPart.ElementsBegin() + i); array_1d<double, 3> local_coordinates; bool is_found = false; std::vector<array_1d<double, 3>> xg; element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rBackgroundGridModelPart.GetProcessInfo()); GeometryType& r_found_geom = FindGridGeom(element_itr->GetGeometry().GetGeometryParent(0), rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates, rMPMModelPart.GetProcessInfo(), is_found); if (is_found) { const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false; if (is_pqmpm) { // Updates the quadrature point geometry. (*element_itr).GetGeometry().SetGeometryParent(&r_found_geom); PQMPMPartitionUtilities::PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, xg[0], local_coordinates, *element_itr, element_itr->pGetGeometry(), Tolerance); } else { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( element_itr->pGetGeometry(), local_coordinates, element_itr->GetGeometry().IntegrationPoints()[0].Weight(), r_found_geom); } if (IsExplicitAndNeedsCorrection(element_itr->pGetGeometry(), rBackgroundGridModelPart.GetProcessInfo())) is_found = false; else { for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j) r_found_geom.Points()[j].Set(ACTIVE); } } if(!is_found) { #pragma omp critical rMissingElements.push_back(&*element_itr); } } } // inline void NeighbourSearchConditions(const ModelPart& rMPMModelPart, const ModelPart& rBackgroundGridModelPart, std::vector<typename Condition::Pointer>& rMissingConditions, const double Tolerance) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rMPMModelPart.Conditions().size()); ++i) { auto condition_itr = rMPMModelPart.Conditions().begin() + i; std::vector<array_1d<double, 3>> xg; condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo()); if (xg.size() > 0 && condition_itr->Is(BOUNDARY)) { array_1d<double, 3> local_coordinates; bool is_found = false; GeometryType& r_found_geom = FindGridGeom(condition_itr->GetGeometry(), rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates, rMPMModelPart.GetProcessInfo(), is_found); if (is_found) { condition_itr->GetGeometry() = r_found_geom; for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j) r_found_geom[j].Set(ACTIVE); } else { #pragma omp critical rMissingConditions.push_back(&*condition_itr); } } } } inline bool IsFixExplicitAndOnElementEdge(const Vector& N, const ProcessInfo& rProcessInfo) { if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { // check if MP is exactly on the edge of the element, this gives spurious strains in explicit for (SizeType i = 0; i < N.size(); ++i) { if (std::abs(N[i]) < std::numeric_limits<double>::epsilon()) { return true; } } } } return false; } template <std::size_t TDimension> void BinBasedSearchElementsAndConditions(ModelPart& rMPMModelPart, ModelPart& rBackgroundGridModelPart, std::vector<typename Element::Pointer>& rMissingElements, std::vector<typename Condition::Pointer>& rMissingConditions, const std::size_t MaxNumberOfResults, const double Tolerance) { const ProcessInfo& r_process_info = rBackgroundGridModelPart.GetProcessInfo(); bool is_pqmpm = (r_process_info.Has(IS_PQMPM)) ? r_process_info.GetValue(IS_PQMPM) : false; // Search background grid and make element active Vector N; const int max_result = 1000; #pragma omp parallel { BinBasedFastPointLocator<TDimension> SearchStructure(rBackgroundGridModelPart); SearchStructure.UpdateSearchDatabase(); typename BinBasedFastPointLocator<TDimension>::ResultContainerType results(max_result); // Element search and assign background grid #pragma omp for for (int i = 0; i < static_cast<int>(rMissingElements.size()); ++i) { auto element_itr = *(rMissingElements.begin() + i); std::vector<array_1d<double, 3>> xg; element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rMPMModelPart.GetProcessInfo()); typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { if (IsFixExplicitAndOnElementEdge(N, r_process_info) && !is_pqmpm) { // MP is exactly on the edge. Now we give it a little 'nudge' array_1d<double, 3> xg_nudged = array_1d<double, 3>(xg[0]); std::vector<array_1d<double, 3>> mp_vel; element_itr->CalculateOnIntegrationPoints(MP_VELOCITY, mp_vel, rMPMModelPart.GetProcessInfo()); xg_nudged += r_process_info[DELTA_TIME] / 1000.0 * mp_vel[0]; if (SearchStructure.FindPointOnMesh(xg_nudged, N, pelem, result_begin, MaxNumberOfResults, Tolerance)) { element_itr->SetValuesOnIntegrationPoints(MP_COORD, { xg_nudged }, rMPMModelPart.GetProcessInfo()); KRATOS_INFO("MPMSearchElementUtility") << "WARNING: To prevent spurious explicit stresses, Material Point " << element_itr->Id() << " was nudged." << std::endl; } else { is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Material Point " << element_itr->Id() << " lies exactly on an element edge and may give spurious results." << std::endl; } } pelem->Set(ACTIVE); const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false; if (is_pqmpm) { // Updates the quadrature point geometry. (*element_itr).GetGeometry().SetGeometryParent((pelem->pGetGeometry().get())); UpdatePartitionedQuadraturePoint(rBackgroundGridModelPart, xg[0], *element_itr, pelem->pGetGeometry(), Tolerance); } else { auto p_quadrature_point_geometry = element_itr->pGetGeometry(); array_1d<double, 3> local_coordinates; p_quadrature_point_geometry->PointLocalCoordinates(local_coordinates, xg[0]); CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( p_quadrature_point_geometry, local_coordinates, p_quadrature_point_geometry->IntegrationPoints()[0].Weight(), pelem->GetGeometry()); } auto& r_geometry = element_itr->GetGeometry(); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Set(ACTIVE); } else { KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point: " << element_itr->Id() << " is failed. Geometry is cleared." << std::endl; element_itr->GetGeometry().clear(); element_itr->Reset(ACTIVE); element_itr->Set(TO_ERASE); } } // Condition search and assign background grid #pragma omp for for (int i = 0; i < static_cast<int>(rMissingConditions.size()); ++i) { auto condition_itr = *(rMissingConditions.begin() + i); std::vector<array_1d<double, 3>> xg; condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo()); if (xg.size() > 0) { // Only search for particle based BCs! // Grid BCs are still applied on MP_model_part but we don't want to search for them. typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { condition_itr->GetGeometry() = pelem->GetGeometry(); auto& r_geometry = condition_itr->GetGeometry(); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Set(ACTIVE); } else { KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point Condition: " << condition_itr->Id() << " is failed. Geometry is cleared." << std::endl; condition_itr->GetGeometry().clear(); condition_itr->Reset(ACTIVE); condition_itr->Set(TO_ERASE); } } } } } inline void ResetElementsAndNodes(ModelPart& rBackgroundGridModelPart) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rBackgroundGridModelPart.Elements().size()); ++i) { auto element_itr = rBackgroundGridModelPart.Elements().begin() + i; auto& r_geometry = element_itr->GetGeometry(); element_itr->Reset(ACTIVE); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Reset(ACTIVE); } } /** * @brief Search element connectivity for each particle * @details A search is performed to know in which grid element the material point falls. * If one or more material points fall in the grid element, the grid element is * set to be active and its connectivity is associated to the material point * element. * STEPS: * 1) All the elements are set to be INACTIVE * 2) A searching is performed and the grid elements which contain at least a MP are set to be ACTIVE * */ template<std::size_t TDimension> void SearchElement(ModelPart& rBackgroundGridModelPart, ModelPart& rMPMModelPart, const std::size_t MaxNumberOfResults, const double Tolerance) { ResetElementsAndNodes(rBackgroundGridModelPart); std::vector<typename Element::Pointer> missing_elements; std::vector<typename Condition::Pointer> missing_conditions; NeighbourSearchElements(rMPMModelPart, rBackgroundGridModelPart, missing_elements, Tolerance); NeighbourSearchConditions(rMPMModelPart, rBackgroundGridModelPart, missing_conditions, Tolerance); if (missing_conditions.size() > 0 || missing_elements.size() > 0) BinBasedSearchElementsAndConditions<TDimension>(rMPMModelPart, rBackgroundGridModelPart, missing_elements, missing_conditions, MaxNumberOfResults, Tolerance); } } // end namespace MPMSearchElementUtility } // end namespace Kratos #endif // KRATOS_MPM_SEARCH_ELEMENT_UTILITY
swap_buffer.c
#include <stdio.h> #include "wgrib2.h" /* * does a byte swap of 4-byte integers/ieee * * n should be a multiple of 4 * * 3/2008 Public Domain Wesley Ebisuzaki * 7/2015 OpenMP version Wesley Ebisuzaki */ int swap_buffer(unsigned char *buffer, unsigned int n) { unsigned int ii; unsigned char i, j; #pragma omp parallel for private(ii, i, j) for (ii = 0; ii < n; ii += 4) { i = buffer[ii]; j = buffer[ii+1]; buffer[ii] = buffer[ii+3]; buffer[ii+1] = buffer[ii+2]; buffer[ii+2] = j; buffer[ii+3] = i; } return 0; }
11_det_3x3.c
/* Program : 11 Author : Gyan Topic : Write a C program using OpenMP features to find the determinant of a 3x3 matrix. */ #include <stdio.h> #include <omp.h> int main() { int i, m, D=0; int a[3][3] = { {1, 2, 3}, {4, 5, 6}, {7, 8, 10}}; m = omp_get_num_procs(); omp_set_num_threads(m); #pragma omp parallel for shared(a, D) private(i) for(i=0;i<3;i++) D += a[0][i] *( a[1][(i+1)%3] * a[2][(i+2)%3] - a[1][ (i+2)%3] * a[2][(i+1)%3 ]); printf("\nDeterminant = %d\n",D); return 0; }
knn.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "cuda_utils.h" #include <faiss/Heap.h> #include <faiss/gpu/GpuDistance.h> #include <faiss/gpu/GpuIndexFlat.h> #include <faiss/gpu/GpuResources.h> #include <faiss/gpu/IndexProxy.h> #include <faiss/gpu/StandardGpuResources.h> #include <iostream> namespace MLCommon { namespace Selection { /** Merge results from several shards into a single result set. * @param n number of elements in search array * @param k number of neighbors returned * @param distances output distance array * @param labels output index array * @param all_distances row-wise stacked array of intermediary knn output distances size nshard * n * k * @param all_labels row-wise stacked array of intermediary knn output indices size nshard * n * k * @param translations label translations to apply, size nshard */ template <class C> void merge_tables(long n, long k, long nshard, float *distances, long *labels, float *all_distances, long *all_labels, long *translations) { if (k == 0) { return; } size_t stride = n * k; #pragma omp parallel { std::vector<int> buf(2 * nshard); int *pointer = buf.data(); int *shard_ids = pointer + nshard; std::vector<float> buf2(nshard); float *heap_vals = buf2.data(); #pragma omp for for (long i = 0; i < n; i++) { // the heap maps values to the shard where they are // produced. const float *D_in = all_distances + i * k; const long *I_in = all_labels + i * k; int heap_size = 0; for (long s = 0; s < nshard; s++) { pointer[s] = 0; if (I_in[stride * s] >= 0) faiss::heap_push<C>(++heap_size, heap_vals, shard_ids, D_in[stride * s], s); } float *D = distances + i * k; long *I = labels + i * k; for (int j = 0; j < k; j++) { if (heap_size == 0) { I[j] = -1; D[j] = C::neutral(); } else { // pop best element int s = shard_ids[0]; int &p = pointer[s]; D[j] = heap_vals[0]; I[j] = I_in[stride * s + p] + translations[s]; faiss::heap_pop<C>(heap_size--, heap_vals, shard_ids); p++; if (p < k && I_in[stride * s + p] >= 0) faiss::heap_push<C>(++heap_size, heap_vals, shard_ids, D_in[stride * s + p], s); } } } } }; /** * Search the kNN for the k-nearest neighbors of a set of query vectors * @param input device memory to search as an array of device pointers * @param sizes array of memory sizes * @param n_params size of input and sizes arrays * @param D number of cols in input and search_items * @param search_items set of vectors to query for neighbors * @param n number of items in search_items * @param res_I pointer to device memory for returning k nearest indices * @param res_D pointer to device memory for returning k nearest distances * @param k number of neighbors to query * @param s the cuda stream to use */ template <typename IntType = int> void brute_force_knn(float **input, int *sizes, int n_params, IntType D, float *search_items, IntType n, long *res_I, float *res_D, IntType k, cudaStream_t s) { std::vector<long> *id_ranges = new std::vector<long>(); IntType total_n = 0; for (int i = 0; i < n_params; i++) { if (i < n_params) // if i < sizes[i] id_ranges->push_back(total_n); total_n += sizes[i]; } float *result_D = new float[k * size_t(n)]; long *result_I = new long[k * size_t(n)]; float *all_D = new float[n_params * k * size_t(n)]; long *all_I = new long[n_params * k * size_t(n)]; ASSERT_DEVICE_MEM(search_items, "search items"); ASSERT_DEVICE_MEM(res_I, "output index array"); ASSERT_DEVICE_MEM(res_D, "output distance array"); CUDA_CHECK(cudaStreamSynchronize(s)); #pragma omp parallel { #pragma omp for for (int i = 0; i < n_params; i++) { const float *ptr = input[i]; IntType size = sizes[i]; cudaPointerAttributes att; cudaError_t err = cudaPointerGetAttributes(&att, ptr); if (err == 0 && att.device > -1) { CUDA_CHECK(cudaSetDevice(att.device)); CUDA_CHECK(cudaPeekAtLastError()); try { faiss::gpu::StandardGpuResources gpu_res; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); gpu_res.noTempMemory(); gpu_res.setCudaMallocWarning(false); gpu_res.setDefaultStream(att.device, stream); faiss::gpu::bruteForceKnn( &gpu_res, faiss::METRIC_L2, ptr, size, search_items, n, D, k, all_D + (long(i) * k * long(n)), all_I + (long(i) * k * long(n))); CUDA_CHECK(cudaPeekAtLastError()); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaStreamDestroy(stream)); } catch (const std::exception &e) { std::cout << "Exception occurred: " << e.what() << std::endl; } } else { std::stringstream ss; ss << "Input memory for " << ptr << " failed. isDevice?=" << att.devicePointer << ", N=" << sizes[i]; std::cout << "Exception: " << ss.str() << std::endl; } } } merge_tables<faiss::CMin<float, IntType>>( long(n), k, n_params, result_D, result_I, all_D, all_I, id_ranges->data()); MLCommon::updateDevice(res_D, result_D, k * size_t(n), s); MLCommon::updateDevice(res_I, result_I, k * size_t(n), s); delete all_D; delete all_I; delete result_D; delete result_I; }; }; // namespace Selection }; // namespace MLCommon
archive_blake2sp_ref.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include "archive_platform.h" #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "archive_blake2.h" #include "archive_blake2_impl.h" #define PARALLELISM_DEGREE 8 /* blake2sp_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P ) { int err = blake2s_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint32_t offset ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, offset ); store16( &P->xof_length, 0 ); P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2sp_init_leaf_param( S, P ); } static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, 0 ); store16( &P->xof_length, 0 ); P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } int blake2sp_init( blake2sp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, 0, (uint32_t)i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2sp_final( blake2sp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( S->R, out, S->outlen ); } int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > i * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[i], in__, len ); } blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); } #if defined(BLAKE2SP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2S_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2S_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES ); if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2sp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2sp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif
nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP DEV implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v) { return SUNDIALS_NVEC_OPENMPDEV; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length, SUNContext sunctx) { N_Vector v; N_VectorContent_OpenMPDEV content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV; v->ops->nvclone = N_VClone_OpenMPDEV; v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV; v->ops->nvdestroy = N_VDestroy_OpenMPDEV; v->ops->nvspace = N_VSpace_OpenMPDEV; v->ops->nvgetlength = N_VGetLength_OpenMPDEV; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_OpenMPDEV; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_OpenMPDEV; v->ops->nvprint = N_VPrint_OpenMPDEV; v->ops->nvprintfile = N_VPrintFile_OpenMPDEV; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV; v->ops->nvconst = N_VConst_OpenMPDEV; v->ops->nvprod = N_VProd_OpenMPDEV; v->ops->nvdiv = N_VDiv_OpenMPDEV; v->ops->nvscale = N_VScale_OpenMPDEV; v->ops->nvabs = N_VAbs_OpenMPDEV; v->ops->nvinv = N_VInv_OpenMPDEV; v->ops->nvaddconst = N_VAddConst_OpenMPDEV; v->ops->nvdotprod = N_VDotProd_OpenMPDEV; v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV; v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV; v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV; v->ops->nvmin = N_VMin_OpenMPDEV; v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV; v->ops->nvl1norm = N_VL1Norm_OpenMPDEV; v->ops->nvcompare = N_VCompare_OpenMPDEV; v->ops->nvinvtest = N_VInvTest_OpenMPDEV; v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV; v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV; v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV; v->ops->nvminlocal = N_VMin_OpenMPDEV; v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV; v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV; v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV; v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV; /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMPDEV(sunindextype length) { N_Vector v; realtype *data; realtype *dev_data; int dev; v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Update ownership */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v) = data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata) { N_Vector v; int dev, host; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); if (length > 0) { /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNFALSE; NV_DATA_HOST_OMPDEV(v) = h_vdata; NV_DATA_DEV_OMPDEV(v) = d_vdata; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMPDEV */ void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMPDEV(N_Vector v) { return NV_LENGTH_OMPDEV(v); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the host. */ realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_HOST_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the device. */ realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_DEV_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMPDEV(N_Vector x) { N_VPrintFile_OpenMPDEV(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMPDEV(x); xd = NV_DATA_HOST_OMPDEV(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%11.8g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* ---------------------------------------------------------------------------- * Function to copy host array into device array */ void N_VCopyToDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from host to device */ omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host); return; } /* ---------------------------------------------------------------------------- * Function to copy device array into host array */ void N_VCopyFromDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from device to host */ omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w) { N_Vector v; N_VectorContent_OpenMPDEV content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_OMPDEV(w); content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMPDEV(N_Vector w) { N_Vector v; realtype *data; realtype *dev_data; sunindextype length; int dev; v = NULL; v = N_VCloneEmpty_OpenMPDEV(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMPDEV(w); /* Create data */ if (length > 0) { /* Update ownership flag */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v)= data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMPDEV(N_Vector v) { int dev; if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data arrays if they are owned by the vector */ if (NV_OWN_DATA_OMPDEV(v)) { if (NV_DATA_HOST_OMPDEV(v) != NULL) { free(NV_DATA_HOST_OMPDEV(v)); NV_DATA_HOST_OMPDEV(v) = NULL; } if (NV_DATA_DEV_OMPDEV(v) != NULL) { dev = omp_get_default_device(); omp_target_free(NV_DATA_DEV_OMPDEV(v), dev); NV_DATA_DEV_OMPDEV(v) = NULL; } } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMPDEV(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd_dev, *yd_dev, *zd_dev; N_Vector v1, v2; booleantype test; int dev; xd_dev = yd_dev = zd_dev = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMPDEV(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMPDEV(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMPDEV(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMPDEV(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMPDEV(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMPDEV(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMPDEV(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMPDEV(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMPDEV(realtype c, N_Vector z) { sunindextype i, N; realtype *zd_dev; int dev; zd_dev = NULL; N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]*yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]/yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMPDEV(c, x); return; } if (c == ONE) { VCopy_OpenMPDEV(x, z); } else if (c == -ONE) { VNeg_OpenMPDEV(x, z); } else { N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*xd_dev[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = SUNRabs(xd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = ONE/xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; sum = ZERO; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += xd_dev[i]*yd_dev[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype max, *xd_dev; int dev; max = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:max) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1) for (i = 0; i < N; i++) { max = SUNMAX(SUNRabs(xd_dev[i]), max); } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(sum); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev, *idd_dev; int dev; sum = ZERO; xd_dev = wd_dev = idd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { if (idd_dev[i] > ZERO) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } } return(sum); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype min, *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(from:min) is_device_ptr(xd_dev) device(dev) #pragma omp teams num_teams(1) { min = xd_dev[0]; #pragma omp distribute parallel for reduction(min:min) schedule(static, 1) for (i = 1; i < N; i++) { min = SUNMIN(xd_dev[i], min); } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype sum, *xd_dev; int dev; sum = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i<N; i++) sum += SUNRabs(xd_dev[i]); return(sum); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO; return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev, val; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); val = ZERO; #pragma omp target map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1) for (i = 0; i < N; i++) { if (xd_dev[i] == ZERO) val = ONE; else zd_dev[i] = ONE/xd_dev[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd_dev, *xd_dev, *md_dev; int dev; cd_dev = xd_dev = md_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); cd_dev = NV_DATA_DEV_OMPDEV(c); md_dev = NV_DATA_DEV_OMPDEV(m); /* get default device identifier */ dev = omp_get_default_device(); temp = ONE; #pragma omp target map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1) for (i = 0; i < N; i++) { md_dev[i] = ZERO; if (cd_dev[i] == ZERO) continue; if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) { if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; } continue; } if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) { if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; } } } if (temp == ONE) return (SUNTRUE); else return(SUNFALSE); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd_dev, *dd_dev, min; int dev; nd_dev = dd_dev = NULL; N = NV_LENGTH_OMPDEV(num); nd_dev = NV_DATA_DEV_OMPDEV(num); dd_dev = NV_DATA_DEV_OMPDEV(denom); /* get default device identifier */ dev = omp_get_default_device(); min = BIG_REAL; #pragma omp target map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1) for (i = 0; i < N; i++) if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min); return(min); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i, dev; realtype to_add; /* temporary variable to hold sum being added in atomic operation */ sunindextype j, N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store X dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1 */ if (X[0] == z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) { #pragma omp teams distribute parallel for schedule(static,1) for (j=0; j<N; j++) zd_dev[j] *= c[0]; } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * z = sum{ c[i] * X[i] }, i = 0,...,nvec-1 */ xd_dev = NV_DATA_DEV_OMPDEV(X[0]); #pragma omp target map(to:N,c[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) { #pragma omp teams distribute parallel for schedule(static, 1) for (j=0; j<N; j++) { zd_dev[j] = c[0] * xd_dev[j]; } } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a[i] * xd_dev[j]; } } free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j]; } } free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i, dev; sunindextype j, N; realtype sum; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** yd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VDotProd */ if (nvec == 1) { dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* initialize dot products */ for (i=0; i<nvec; i++) { dotprods[i] = ZERO; } /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* compute multiple dot products */ #pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) for (j=0; j<N; j++) sum += xd_dev[j] * yd_dev[j]; dotprods[i] += sum; } free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------- */ int N_VLinearSumVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; N_Vector* V1; N_Vector* V2; booleantype test; realtype c; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]); return(0); } /* BLAS usage: axpy y <- ax+y */ if ((b == ONE) && (Z == Y)) return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y)); /* BLAS usage: axpy x <- by+x */ if ((a == ONE) && (Z == X)) return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X)); /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z)); /* Cases: */ /* (1) a == 1.0, b = -1.0, */ /* (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { V1 = test ? Y : X; V2 = test ? X : Y; return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z)); } /* Cases: */ /* (1) a == 1.0, b == other or 0.0, */ /* (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Cases: */ /* (1) a == -1.0, b != 1.0, */ /* (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Case: a == -b */ if (a == -b) return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Do all cases not handled above: */ /* (1) a == other, b == 0.0 - user should have called N_VScale */ /* (2) a == 0.0, b == other - user should have called N_VScale */ /* (3) a,b == other, a !=b, a != -b */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* compute linear sum for each vector pair in vector arrays */ #pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a * xd_dev[j] + b * yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) { xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); } /* * X[i] *= c[i] */ if (X == Z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) xd_dev[j] *= c[i]; } } free(xd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i] = c[i] * X[i] */ #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c[i] * xd_dev[j]; } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* zd_dev=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VConst */ if (nvec == 1) { N_VConst_OpenMPDEV(c, Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get device */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* set each vector in the vector array to a constant */ #pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c; } } free(zd_dev_ptrs); return(0); } int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } nrm[i] = SUNRsqrt(sum/N); } } free(wd_dev_ptrs); free(xd_dev_ptrs); return(0); } int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype* idd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id); return(0); } /* get vector length and mask data array */ N = NV_LENGTH_OMPDEV(X[0]); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) { if (idd_dev[j] > ZERO) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } } nrm[i] = SUNRsqrt(sum/N); } } free(xd_dev_ptrs); free(wd_dev_ptrs); return(0); } int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j, dev; sunindextype k, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector *) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (j=0; j<nsum; j++) { YY[j] = Y[j][0]; ZZ[j] = Z[j][0]; } retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ); free(YY); free(ZZ); return(retval); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]); } /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) yd_dev[k] += a[j] * xd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]); } /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; zd_dev = zd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { int i; /* vector arrays index in summation [0,nsum) */ int j; /* vector index in vector array [0,nvec) */ sunindextype k; /* element index in vector [0,N) */ sunindextype N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** zd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; int dev; realtype* ctmp; N_Vector* Y; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VScale */ if (nsum == 1) { N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]); return(0); } /* should have called N_VLinearSum */ if (nsum == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]); return(0); } /* should have called N_VLinearCombination */ Y = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (i=0; i<nsum; i++) { Y[i] = X[i][0]; } N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]); free(Y); return(0); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; j<nvec; j++) { ctmp[j] = c[0]; } N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z); free(ctmp); return(0); } /* should have called N_VLinearSumVectorArray */ if (nsum == 2) { N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z); return(0); } /* -------------------------- * Compute linear combination * -------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (j=0; j<nvec; j++) zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]); for (j=0; j<nvec; j++) { for (i=0; i<nsum; i++) xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]); } /* * X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1 */ if ((X[0] == Z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1 */ if (X[0] == Z) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] *= c[0]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1 */ #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { /* scale first vector in the sum into the output vector */ xd_dev = xd_dev_ptrs[j*nsum]; zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = c[0] * xd_dev[k]; /* scale and sum remaining vectors into the output vector */ for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * private functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into a second vector */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = -xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]+yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]-yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y) { sunindextype i, N; realtype *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); if (a == ONE) { #pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += xd_dev[i]; return; } if (a == -ONE) { #pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] -= xd_dev[i]; return; } #pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += a*xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector x[i] = a*x[i] */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x) { sunindextype i, N; realtype *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) xd_dev[i] *= a; return; } /* * ----------------------------------------------------------------- * private functions for special cases of vector array operations * ----------------------------------------------------------------- */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] + yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev ointer to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] - yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); if (a == ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } if (a == -ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] -= xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a * xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_OpenMPDEV; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); }
GB_unop__identity_fc64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_uint16) // op(A') function: GB (_unop_tran__identity_fc64_uint16) // C type: GxB_FC64_t // A type: uint16_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_uint16) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
nbody_barnes_hut.c
/* ** nbody_barnes_hut.c - nbody simulation that implements the Barnes-Hut algorithm (O(nlog(n))) ** **/ #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <math.h> #include <sys/time.h> #include <assert.h> #include <unistd.h> #ifdef DISPLAY #include <X11/Xlib.h> #include <X11/Xutil.h> #endif #include "ui.h" #include "nbody.h" #include "nbody_tools.h" #include "nbody_alloc.h" #define LEVEL_SUBTRACT 2 FILE* f_out=NULL; int nparticles=10; /* number of particles */ float T_FINAL=1.0; /* simulation end time */ particle_t*particles; node_t *root; double sum_speed_sq = 0; double max_acc = 0; double max_speed = 0; void init() { init_alloc(4*nparticles); root = malloc(sizeof(node_t)); init_node(root, NULL, XMIN, XMAX, YMIN, YMAX); } #ifdef DISPLAY extern Display *theDisplay; /* These three variables are required to open the */ extern GC theGC; /* particle plotting window. They are externally */ extern Window theMain; /* declared in ui.h but are also required here. */ #endif /* compute the force that a particle with position (x_pos, y_pos) and mass 'mass' * applies to particle p */ void compute_force(particle_t*p, double x_pos, double y_pos, double mass) { double x_sep, y_sep, dist_sq, grav_base; x_sep = x_pos - p->x_pos; y_sep = y_pos - p->y_pos; dist_sq = MAX((x_sep*x_sep) + (y_sep*y_sep), 0.01); /* Use the 2-dimensional gravity rule: F = d * (GMm/d^2) */ grav_base = GRAV_CONSTANT*(p->mass)*(mass)/dist_sq; p->x_force += grav_base*x_sep; p->y_force += grav_base*y_sep; } /* compute the force that node n acts on particle p */ void compute_force_on_particle(node_t* n, particle_t *p) { if(! n || n->n_particles==0) { return; } if(n->particle) { /* only one particle */ assert(n->children == NULL); /* If the current node is an external node (and it is not body b), calculate the force exerted by the current node on b, and add this amount to b's net force. */ compute_force(p, n->x_center, n->y_center, n->mass); } else { /* There are multiple particles */ #define THRESHOLD 2 double size = n->x_max - n->x_min; // width of n double diff_x = n->x_center - p->x_pos; double diff_y = n->y_center - p->y_pos; double distance = sqrt(diff_x*diff_x + diff_y*diff_y); #if BRUTE_FORCE /* Run the procedure recursively on each of the current node's children. --> This result in a brute-force computation (complexity: O(n*n)) */ int i; for(i=0; i<4; i++) { compute_force_on_particle(&n->children[i], p); } #else /* Use the Barnes-Hut algorithm to get an approximation */ if(size / distance < THRESHOLD) { /* The particle is far away. Use an approximation of the force */ compute_force(p, n->x_center, n->y_center, n->mass); } else { /* Otherwise, run the procedure recursively on each of the current node's children. */ int i; for(i=0; i<4; i++) { compute_force_on_particle(&n->children[i], p); } } #endif } } /* void serial_compute_force_in_node(node_t *n) { if(!n) return; if(n->particle) { particle_t*p = n->particle; p->x_force = 0; p->y_force = 0; compute_force_on_particle(root, p); } if(n->children) { int i; for(i=0; i<4; i++) { serial_compute_force_in_node(&n->children[i]); } } } */ void compute_force_in_node(node_t *n) { if(!n) return; if(n->particle) { particle_t*p = n->particle; p->x_force = 0; p->y_force = 0; compute_force_on_particle(root, p); } if(n->children) { int i; for(i=0; i<4; i++) {; compute_force_in_node(&n->children[i]); } } } /* compute the new position/velocity */ void move_particle(particle_t*p, double step, node_t* new_root) { p->x_pos += (p->x_vel)*step; p->y_pos += (p->y_vel)*step; double x_acc = p->x_force/p->mass; double y_acc = p->y_force/p->mass; p->x_vel += x_acc*step; p->y_vel += y_acc*step; /* compute statistics */ double cur_acc = (x_acc*x_acc + y_acc*y_acc); cur_acc = sqrt(cur_acc); double speed_sq = (p->x_vel)*(p->x_vel) + (p->y_vel)*(p->y_vel); double cur_speed = sqrt(speed_sq); sum_speed_sq += speed_sq; max_acc = MAX(max_acc, cur_acc); max_speed = MAX(max_speed, cur_speed); p->node = NULL; if(p->x_pos < new_root->x_min || p->x_pos > new_root->x_max || p->y_pos < new_root->y_min || p->y_pos > new_root->y_max) { free(p); nparticles--; } else { //omp_set_lock(&lock); insert_particle(p, new_root); //omp_unset_lock(&lock); } } /* compute the new position of the particles in a node */ void move_particles_in_node(node_t*n, double step, node_t *new_root) { if(!n) return; if(n->particle) { particle_t*p = n->particle; move_particle(p, step, new_root); } if(n->children) { int i; //#pragma omp parallel //{ //#pragma omp single for(i=0; i<4; i++) { //#pragma omp task move_particles_in_node(&n->children[i], step, new_root); } //} } } /* Move particles one time step. Update positions, velocity, and acceleration. Return local computations. */ void all_move_particles(double step) { /* First calculate force for particles. */ int i=0; #pragma omp parallel for schedule(dynamic) for(i=0;i<nparticles;i++) { compute_force_in_node(particles[i].node); } node_t* new_root = malloc(sizeof(node_t)); init_node(new_root, NULL, XMIN, XMAX, YMIN, YMAX); /* then move all particles and return statistics */ move_particles_in_node(root, step, new_root); free_node(root); free(root); root = new_root; } void run_simulation() { double t = 0.0, dt = 0.01; while (t < T_FINAL && nparticles>0) { /* Update time. */ t += dt; /* Move particles with the current and compute rms velocity. */ all_move_particles(dt); /* Adjust dt based on maximum speed and acceleration--this simple rule tries to insure that no velocity will change by more than 10% */ dt = 0.1*max_speed/max_acc; /* Plot the movement of the particle */ #if DISPLAY node_t *n = root; clear_display(); draw_node(n); flush_display(); #endif } //int i=0; /*for(;i<nparticles;i++){ printf("pos = (%lf,%lf)\n",particles[i].x_pos,particles[i].y_pos); }*/ } /* create a quad-tree from an array of particles */ void insert_all_particles(int nparticles, particle_t*particles, node_t*root) { int i=0; for(i=0; i<nparticles; i++) { insert_particle(&particles[i], root); } } /* Simulate the movement of nparticles particles. */ int main(int argc, char**argv) { if(argc >= 2) { nparticles = atoi(argv[1]); } if(argc == 3) { T_FINAL = atof(argv[2]); } omp_init_lock(&lock); init(); /* Allocate global shared arrays for the particles data set. */ particles = malloc(sizeof(particle_t)*nparticles); all_init_particles(nparticles, particles); insert_all_particles(nparticles, particles, root); /* Initialize thread data structures */ #ifdef DISPLAY /* Open an X window to display the particles */ simple_init (100,100,DISPLAY_SIZE, DISPLAY_SIZE); #endif struct timeval t1, t2; gettimeofday(&t1, NULL); /* Main thread starts simulation ... */ run_simulation(); gettimeofday(&t2, NULL); double duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6); #ifdef DUMP_RESULT FILE* f_out = fopen("particles.log", "w"); assert(f_out); print_particles(f_out, root); fclose(f_out); #endif printf("-----------------------------\n"); printf("nparticles: %d\n", nparticles); printf("T_FINAL: %f\n", T_FINAL); printf("-----------------------------\n"); printf("Simulation took %lf s to complete\n", duration); #ifdef DISPLAY node_t *n = root; clear_display(); draw_node(n); flush_display(); printf("Hit return to close the window."); getchar(); /* Close the X window used to display the particles */ XCloseDisplay(theDisplay); #endif omp_destroy_lock(&lock); return 0; }
HDF5Dumper_MPI.h
/* * HDF5Dumper_MPI.h * Cubism * * Created by Babak Hejazialhosseini on 5/24/09. * Copyright 2009 CSE Lab, ETH Zurich. All rights reserved. * */ #pragma once #include <cassert> #include <cstdio> #include <iostream> #include <vector> #include <string> #include <sstream> #include <mpi.h> #include "HDF5Dumper.h" CUBISM_NAMESPACE_BEGIN // The following requirements for the data TStreamer are required: // TStreamer::NCHANNELS : Number of data elements (1=Scalar, 3=Vector, 9=Tensor) // TStreamer::operate : Data access methods for read and write // TStreamer::getAttributeName : Attribute name of the date ("Scalar", "Vector", "Tensor") template<typename TStreamer, typename hdf5Real, typename TGrid> void DumpHDF5_MPI(const TGrid &grid, const int iCounter, const typename TGrid::Real absTime, const std::string &fname, const std::string &dpath = ".", const bool bXMF = true) { #ifdef CUBISM_USE_HDF typedef typename TGrid::BlockType B; int rank; // fname is the base filepath tail without file type extension and // additional identifiers std::ostringstream filename; std::ostringstream fullpath; filename << fname; fullpath << dpath << "/" << filename.str(); MPI_Comm comm = grid.getCartComm(); MPI_Comm_rank(comm, &rank); int coords[3]; grid.peindex(coords); herr_t status; hid_t file_id, dataset_id, fspace_id, fapl_id, mspace_id; /////////////////////////////////////////////////////////////////////////// // write mesh std::vector<int> mesh_dims; std::vector<std::string> dset_name; dset_name.push_back("/vx"); dset_name.push_back("/vy"); dset_name.push_back("/vz"); if (0 == rank) { H5open(); fapl_id = H5Pcreate(H5P_FILE_ACCESS); file_id = H5Fcreate((fullpath.str()+".h5").c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); status = H5Pclose(fapl_id); for (size_t i = 0; i < 3; ++i) { const MeshMap<B>& m = grid.getMeshMap(i); std::vector<double> vertices(m.ncells()+1, m.start()); mesh_dims.push_back(vertices.size()); for (size_t j = 0; j < m.ncells(); ++j) vertices[j+1] = vertices[j] + m.cell_width(j); hsize_t dim[1] = {vertices.size()}; fspace_id = H5Screate_simple(1, dim, NULL); #ifndef CUBISM_ON_FERMI dataset_id = H5Dcreate(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #else dataset_id = H5Dcreate2(file_id, dset_name[i].c_str(), H5T_NATIVE_DOUBLE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #endif status = H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, vertices.data()); status = H5Sclose(fspace_id); status = H5Dclose(dataset_id); } // shutdown h5 file status = H5Fclose(file_id); H5close(); } MPI_Barrier(comm); /////////////////////////////////////////////////////////////////////////// // startup file H5open(); fapl_id = H5Pcreate(H5P_FILE_ACCESS); status = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); if(status<0) H5Eprint1(stdout); file_id = H5Fopen((fullpath.str()+".h5").c_str(), H5F_ACC_RDWR, fapl_id); status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout); /////////////////////////////////////////////////////////////////////////// // write data const unsigned int NX = static_cast<unsigned int>(grid.getResidentBlocksPerDimension(0))*B::sizeX; const unsigned int NY = static_cast<unsigned int>(grid.getResidentBlocksPerDimension(1))*B::sizeY; const unsigned int NZ = static_cast<unsigned int>(grid.getResidentBlocksPerDimension(2))*B::sizeZ; const unsigned int NCHANNELS = TStreamer::NCHANNELS; if (rank==0) { std::cout << "Allocating " << (NX * NY * NZ * NCHANNELS * sizeof(hdf5Real))/(1024.*1024.*1024.) << " GB of HDF5 data"; } hdf5Real * array_all = new hdf5Real[NX * NY * NZ * NCHANNELS]; std::vector<BlockInfo> vInfo_local = grid.getResidentBlocksInfo(); hsize_t count[4] = {NZ, NY, NX, NCHANNELS}; hsize_t dims[4] = { static_cast<unsigned int>(grid.getBlocksPerDimension(2))*B::sizeZ, static_cast<unsigned int>(grid.getBlocksPerDimension(1))*B::sizeY, static_cast<unsigned int>(grid.getBlocksPerDimension(0))*B::sizeX, NCHANNELS}; if (rank==0) { std::cout << " (Total " << (dims[0] * dims[1] * dims[2] * dims[3] * sizeof(hdf5Real))/(1024.*1024.*1024.) << " GB)" << std::endl; } hsize_t offset[4] = { static_cast<unsigned int>(coords[2]) * NZ, static_cast<unsigned int>(coords[1]) * NY, static_cast<unsigned int>(coords[0]) * NX, 0 }; #pragma omp parallel for for(size_t i=0; i<vInfo_local.size(); i++) { BlockInfo& info = vInfo_local[i]; const int idx[3] = {info.index[0], info.index[1], info.index[2]}; B & b = *(B*)info.ptrBlock; for(int iz=0; iz<static_cast<int>(B::sizeZ); iz++) { const int gz = idx[2]*B::sizeZ + iz; for(int iy=0; iy<static_cast<int>(B::sizeY); iy++) { const int gy = idx[1]*B::sizeY + iy; for(int ix=0; ix<static_cast<int>(B::sizeX); ix++) { const int gx = idx[0]*B::sizeX + ix; const ptrdiff_t idl = NCHANNELS * (gx + NX * (gy + NY * gz)); assert(idl < NX * NY * NZ * NCHANNELS); hdf5Real * const ptr = array_all + idl; hdf5Real output[NCHANNELS]; for(unsigned k=0; k<NCHANNELS; ++k) output[k] = 0; TStreamer::operate(b, ix, iy, iz, (hdf5Real*)output); for(unsigned k=0; k<NCHANNELS; ++k) ptr[k] = output[k]; } } } } fapl_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(fapl_id, H5FD_MPIO_COLLECTIVE); fspace_id = H5Screate_simple(4, dims, NULL); #ifndef CUBISM_ON_FERMI dataset_id = H5Dcreate(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #else dataset_id = H5Dcreate2(file_id, "data", get_hdf5_type<hdf5Real>(), fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); #endif fspace_id = H5Dget_space(dataset_id); H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL); mspace_id = H5Screate_simple(4, count, NULL); status = H5Dwrite(dataset_id, get_hdf5_type<hdf5Real>(), mspace_id, fspace_id, fapl_id, array_all); if (status < 0) H5Eprint1(stdout); status = H5Sclose(mspace_id); if(status<0) H5Eprint1(stdout); status = H5Sclose(fspace_id); if(status<0) H5Eprint1(stdout); status = H5Dclose(dataset_id); if(status<0) H5Eprint1(stdout); status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout); status = H5Fclose(file_id); if(status<0) H5Eprint1(stdout); H5close(); delete [] array_all; if (bXMF && rank==0) { FILE *xmf = 0; xmf = fopen((fullpath.str()+".xmf").c_str(), "w"); fprintf(xmf, "<?xml version=\"1.0\" ?>\n"); fprintf(xmf, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n"); fprintf(xmf, "<Xdmf Version=\"2.0\">\n"); fprintf(xmf, " <Domain>\n"); fprintf(xmf, " <Grid GridType=\"Uniform\">\n"); fprintf(xmf, " <Time Value=\"%e\"/>\n\n", absTime); fprintf(xmf, " <Topology TopologyType=\"3DRectMesh\" Dimensions=\"%d %d %d\"/>\n\n", mesh_dims[2], mesh_dims[1], mesh_dims[0]); fprintf(xmf, " <Geometry GeometryType=\"VxVyVz\">\n"); fprintf(xmf, " <DataItem Name=\"mesh_vx\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[0]); fprintf(xmf, " %s:/vx\n",(filename.str()+".h5").c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " <DataItem Name=\"mesh_vy\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[1]); fprintf(xmf, " %s:/vy\n",(filename.str()+".h5").c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " <DataItem Name=\"mesh_vz\" Dimensions=\"%d\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n", mesh_dims[2]); fprintf(xmf, " %s:/vz\n",(filename.str()+".h5").c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " </Geometry>\n\n"); fprintf(xmf, " <Attribute Name=\"data\" AttributeType=\"%s\" Center=\"Cell\">\n", TStreamer::getAttributeName()); fprintf(xmf, " <DataItem Dimensions=\"%d %d %d %d\" NumberType=\"Float\" Precision=\"%d\" Format=\"HDF\">\n",(int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)sizeof(hdf5Real)); fprintf(xmf, " %s:/data\n",(filename.str()+".h5").c_str()); fprintf(xmf, " </DataItem>\n"); fprintf(xmf, " </Attribute>\n"); fprintf(xmf, " </Grid>\n"); fprintf(xmf, " </Domain>\n"); fprintf(xmf, "</Xdmf>\n"); fclose(xmf); } #else #warning USE OF HDF WAS DISABLED AT COMPILE TIME #endif } template<typename TStreamer, typename hdf5Real, typename TGrid> void ReadHDF5_MPI(TGrid &grid, const std::string& fname, const std::string& dpath=".") { #ifdef CUBISM_USE_HDF typedef typename TGrid::BlockType B; int rank; // fname is the base filepath tail without file type extension and // additional identifiers std::ostringstream filename; std::ostringstream fullpath; filename << fname; fullpath << dpath << "/" << filename.str(); herr_t status; hid_t file_id, dataset_id, fspace_id, fapl_id, mspace_id; MPI_Comm comm = grid.getCartComm(); MPI_Comm_rank(comm, &rank); int coords[3]; grid.peindex(coords); const unsigned int NX = static_cast<unsigned int>(grid.getResidentBlocksPerDimension(0))*B::sizeX; const unsigned int NY = static_cast<unsigned int>(grid.getResidentBlocksPerDimension(1))*B::sizeY; const unsigned int NZ = static_cast<unsigned int>(grid.getResidentBlocksPerDimension(2))*B::sizeZ; const unsigned int NCHANNELS = TStreamer::NCHANNELS; hdf5Real * array_all = new hdf5Real[NX * NY * NZ * NCHANNELS]; std::vector<BlockInfo> vInfo_local = grid.getResidentBlocksInfo(); hsize_t count[4] = {NZ, NY, NX, NCHANNELS}; hsize_t offset[4] = { static_cast<unsigned int>(coords[2]) * NZ, static_cast<unsigned int>(coords[1]) * NY, static_cast<unsigned int>(coords[0]) * NX, 0 }; H5open(); fapl_id = H5Pcreate(H5P_FILE_ACCESS); status = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); if(status<0) H5Eprint1(stdout); file_id = H5Fopen((fullpath.str()+".h5").c_str(), H5F_ACC_RDONLY, fapl_id); status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout); dataset_id = H5Dopen2(file_id, "data", H5P_DEFAULT); fapl_id = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(fapl_id, H5FD_MPIO_COLLECTIVE); fspace_id = H5Dget_space(dataset_id); H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL); mspace_id = H5Screate_simple(4, count, NULL); status = H5Dread(dataset_id, get_hdf5_type<hdf5Real>(), mspace_id, fspace_id, fapl_id, array_all); if (status < 0) H5Eprint1(stdout); #pragma omp parallel for for(size_t i=0; i<vInfo_local.size(); i++) { BlockInfo& info = vInfo_local[i]; const int idx[3] = {info.index[0], info.index[1], info.index[2]}; B & b = *(B*)info.ptrBlock; for(int iz=0; iz<static_cast<int>(B::sizeZ); iz++) for(int iy=0; iy<static_cast<int>(B::sizeY); iy++) for(int ix=0; ix<static_cast<int>(B::sizeX); ix++) { const int gx = idx[0]*B::sizeX + ix; const int gy = idx[1]*B::sizeY + iy; const int gz = idx[2]*B::sizeZ + iz; hdf5Real * const ptr_input = array_all + NCHANNELS*(gx + NX * (gy + NY * gz)); TStreamer::operate(b, ptr_input, ix, iy, iz); } } status = H5Pclose(fapl_id); if(status<0) H5Eprint1(stdout); status = H5Dclose(dataset_id); if(status<0) H5Eprint1(stdout); status = H5Sclose(fspace_id); if(status<0) H5Eprint1(stdout); status = H5Sclose(mspace_id); if(status<0) H5Eprint1(stdout); status = H5Fclose(file_id); if(status<0) H5Eprint1(stdout); H5close(); delete [] array_all; #else #warning USE OF HDF WAS DISABLED AT COMPILE TIME #endif } CUBISM_NAMESPACE_END
ast-dump-openmp-task.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp task ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-task.c:3:1, line:6:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1> // CHECK-NEXT: `-OMPTaskDirective {{.*}} <line:4:1, col:17> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-task.c:4:1) *const restrict'
GB_unaryop__abs_fp32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_bool // op(A') function: GB_tran__abs_fp32_bool // C type: float // A type: bool // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_bool ( float *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
templatemath.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <dll.h> #include <pointercast.h> #include <platformmath.h> #include <DataTypeUtils.h> #define BFLOAT16_MAX_VALUE 32737. #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template <typename T> math_def inline bool nd4j_eq(T val1, T val2, double eps); template<typename T, typename Z> math_def inline Z nd4j_re(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_rint(T val1); template<typename T, typename Z> math_def inline Z nd4j_copysign(T val1, T val2); template <typename T, typename Z> math_def inline Z nd4j_softplus(T val); //#ifndef __CUDACC__ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length); //#endif template<typename T, typename Z> math_def inline Z nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T, typename Z> math_def inline Z nd4j_cos(T val); template<typename T, typename Z> math_def inline Z nd4j_cosh(T val); template<typename X, typename Z> math_def inline Z nd4j_exp(X val); template<typename T, typename Z> math_def inline Z nd4j_floor(T val); template<typename X, typename Z> math_def inline Z nd4j_log(X val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2); template<typename T, typename Z> math_def inline Z nd4j_round(T val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X num, Y denom); template<typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X num, Y denom); template<typename T, typename Z> math_def inline Z nd4j_erf(T num); template<typename T, typename Z> math_def inline Z nd4j_erfc(T num); template<typename T, typename Z> math_def inline Z nd4j_sigmoid(T val) { return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val)); } template<typename T, typename Z> math_def inline Z nd4j_elu(T val, T alpha) { if (val >= (T) 0.f) return val; return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f)); } template<typename T, typename Z> math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; } template<typename T, typename Z> math_def inline Z nd4j_eluderivative(T val, T alpha) { if (val >= static_cast<T>(0.0f)) return static_cast<Z>(1.0f); return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T, typename Z> math_def inline Z nd4j_sin(T val); template<typename T, typename Z> math_def inline Z nd4j_sinh(T val); template<typename T, typename Z> math_def inline Z nd4j_softplus(T val) { return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val)); } template<typename T, typename Z> math_def inline Z nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename X, typename Z> math_def inline Z nd4j_sqrt(X val); template<typename X, typename Z> math_def inline Z nd4j_tanh(X val); template<typename T, typename Z> math_def inline Z nd4j_tan(T val); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2) { return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2)); } template<typename T, typename Z> math_def inline Z nd4j_tan(T tval) { return p_tan<Z>(static_cast<Z>(tval)); } template<typename T, typename Z> math_def inline Z nd4j_tanhderivative(T val) { Z tanh = nd4j_tanh<T,Z>(val); return (Z) 1.0f - tanh * tanh; } template <typename T, typename Z> math_def inline T nd4j_sigmoidderivative(T val) { Z sigmoid = nd4j_sigmoid<T,Z>(val); return sigmoid * ((Z) 1.0f - sigmoid); } template<typename T, typename Z> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (Z) 1.0f / (y * y); } template<typename T, typename Z> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f; } template<typename T, typename Z> math_def inline Z nd4j_sign(T val) { return nd4j_sgn<T, Z>(val); } template<typename T, typename Z> math_def inline Z nd4j_signum(T val) { return nd4j_sgn<T, Z>(val); } template<typename X, typename Z> math_def inline Z nd4j_gamma(X a); template<typename X, typename Z> math_def inline Z nd4j_lgamma(X x); //#ifndef __CUDACC__ /* template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } */ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length) { Z dot = (Z)0.0f; for(int e = 0; e < length; e++) { dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]); } return dot; } //#endif template<typename T, typename Z> math_def inline Z nd4j_acos(T val); template<typename T, typename Z> math_def inline Z nd4j_sech(T val); template<typename T, typename Z> math_def inline Z nd4j_acosh(T val); template<typename T, typename Z> math_def inline Z nd4j_asin(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val); } template<typename T, typename Z> math_def inline Z nd4j_atan(T val); template<typename T, typename Z> math_def inline Z nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) { return (bfloat16) fabsf((float) value); } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return fabs(value); } template<> math_def inline int nd4j_abs<int>(int value) { return abs(value); } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return llabs(value); } template<> math_def inline bool nd4j_abs<bool>(bool value) { return value; } template<> math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) { return value; } template<> math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) { return value; } template<> math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) { return value; } template<> math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) { return value; } template<> math_def inline int8_t nd4j_abs<int8_t>(int8_t value) { return value < 0 ? -value : value; } template<> math_def inline int16_t nd4j_abs<int16_t>(int16_t value) { return value < 0 ? -value : value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) { return value == bfloat16::nan(); //0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isnan<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isnan<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isnan<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) { return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isinf<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isinf<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isinf<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline bool nd4j_max(bool val1, bool val2) { return (val1 || val2) ? true : false; } template<typename T> math_def inline T nd4j_max(T val1, T val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline bool nd4j_min(bool val1, bool val2) { return (val1 && val2) ? true : false; } template<typename T> math_def inline T nd4j_min(T val1, T val2) { return val1 < val2 ? val1 : val2; } template <typename T> math_def inline bool nd4j_eq(T d1, T d2, double eps) { if (nd4j::math::nd4j_isinf<T>(d1) && nd4j::math::nd4j_isinf<T>(d2)) { if (d1 > 0 && d2 > 0) return true; else if (d1 < 0 && d2 < 0) return true; else return false; } auto diff = static_cast<double>(nd4j::math::nd4j_abs<T>(d1 - d2)); // works well except in the range of very large numbers if (diff <= eps) return true; // Knuth approach // works well except in the range of very small numbers if (diff <= nd4j::math::nd4j_max<double>(nd4j::math::nd4j_abs<double>(static_cast<double>(d1)), nd4j::math::nd4j_abs<double>(static_cast<double>(d2))) * eps) return true; return false; } template <typename X, typename Z> math_def inline Z nd4j_ceil(X val) { return static_cast<Z>(p_ceil<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_round(X val) { return static_cast<Z>(p_round<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_asin(X val) { return p_asin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atan(X val) { return p_atan<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atanh(X val) { return p_atanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cosh(X val) { return p_cosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_rint(X val) { return p_rint<X>(val); } template <typename X, typename Z> math_def inline Z nd4j_sinh(X val) { return p_sinh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acos(X val) { return p_acos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sech(X val) { return static_cast<Z>(1) / nd4j_cosh<X,Z>(val); } template <typename X, typename Z> math_def inline Z nd4j_acosh(X val) { return p_acosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cos(X val) { return p_cos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_exp(X val) { return p_exp<X>(val); } template<typename X, typename Z> math_def inline Z nd4j_floor(X val) { return static_cast<Z>(p_floor<X>(val)); } template<typename X, typename Z> math_def inline Z nd4j_log(X val) { return static_cast<Z>(p_log<X>(val)); } /** * This func is special case - it must return floating point value, and optionally Y arg can be floating point argument * @tparam X * @tparam Y * @tparam Z * @param val * @param val2 * @return */ template <typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2) { return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } /** * LogGamma(a) - float point extension of ln(n!) **/ template <typename X, typename Z> math_def inline Z nd4j_lgamma(X x) { // if (x <= X(0.0)) // { // std::stringstream os; // os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given."; // throw std::invalid_argument( os.str() ); // } if (x < X(12.0)) { return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x)); } // Abramowitz and Stegun 6.1.41 // Asymptotic series should be good to at least 11 or 12 figures // For error analysis, see Whittiker and Watson // A Course in Modern Analysis (1927), page 252 static const double c[8] = { 1.0/12.0, -1.0/360.0, 1.0/1260.0, -1.0/1680.0, 1.0/1188.0, -691.0/360360.0, 1.0/156.0, -3617.0/122400.0 }; double z = Z(1.0 / Z(x * x)); double sum = c[7]; for (int i = 6; i >= 0; i--) { sum *= z; sum += c[i]; } double series = sum / Z(x); static const double halfLogTwoPi = 0.91893853320467274178032973640562; return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X val, Y val2) { return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X val, Y val2) { return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Z> math_def inline Z nd4j_sin(X val) { return p_sin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sqrt(X val) { return p_sqrt<Z>(static_cast<Z>(val)); } template <typename X> math_def inline X neg_tanh(X val) { X o = static_cast<X>(1.0f); X t = static_cast<X>(2.0f); X e = static_cast<X>(M_E); auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t); return (p - o)/ (p + o); } template <typename X> math_def inline X pos_tanh(X val) { X o = static_cast<X>(1.0f); X t = static_cast<X>(-2.0f); X e = static_cast<X>(M_E); auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t); return (o - p) / (o + p); } template <typename X, typename Z> math_def inline Z nd4j_tanh(X val) { return val <= 0 ? neg_tanh(val) : pos_tanh(val); //return p_tanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erf(X val) { return p_erf<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erfc(X val) { return p_erfc<Z>(static_cast<Z>(val)); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; template <typename X, typename Z> math_def inline Z nd4j_gamma(X a) { // nd4j_lgamma<X,Z>(a); // return (Z)std::tgamma(a); // Split the function domain into three intervals: // (0, 0.001), [0.001, 12), and (12, infinity) /////////////////////////////////////////////////////////////////////////// // First interval: (0, 0.001) // // For small a, 1/Gamma(a) has power series a + gamma a^2 - ... // So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3. // The relative error over this interval is less than 6e-7. const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant if (a < X(0.001)) return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a))); /////////////////////////////////////////////////////////////////////////// // Second interval: [0.001, 12) if (a < X(12.0)) { // The algorithm directly approximates gamma over (1,2) and uses // reduction identities to reduce other arguments to this interval. double y = (double)a; int n = 0; bool argWasLessThanOne = y < 1.0; // Add or subtract integers as necessary to bring y into (1,2) // Will correct for this below if (argWasLessThanOne) { y += 1.0; } else { n = static_cast<int>(floor(y)) - 1; // will use n later y -= n; } // numerator coefficients for approximation over the interval (1,2) static const double p[] = { -1.71618513886549492533811E+0, 2.47656508055759199108314E+1, -3.79804256470945635097577E+2, 6.29331155312818442661052E+2, 8.66966202790413211295064E+2, -3.14512729688483675254357E+4, -3.61444134186911729807069E+4, 6.64561438202405440627855E+4 }; // denominator coefficients for approximation over the interval (1,2) static const double q[] = { -3.08402300119738975254353E+1, 3.15350626979604161529144E+2, -1.01515636749021914166146E+3, -3.10777167157231109440444E+3, 2.25381184209801510330112E+4, 4.75584627752788110767815E+3, -1.34659959864969306392456E+5, -1.15132259675553483497211E+5 }; double num = 0.0; double den = 1.0; double z = y - 1; for (auto i = 0; i < 8; i++) { num = (num + p[i]) * z; den = den * z + q[i]; } double result = num / den + 1.0; // Apply correction if argument was not initially in (1,2) if (argWasLessThanOne) { // Use identity gamma(z) = gamma(z+1)/z // The variable "result" now holds gamma of the original y + 1 // Thus we use y-1 to get back the orginal y. result /= (y - 1.0); } else { // Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z) for (auto i = 0; i < n; i++) result *= y++; } return Z(result); } /////////////////////////////////////////////////////////////////////////// // Third interval: [12, infinity) if (a > 171.624) { // Correct answer too large to display. Force +infinity. return Z(DOUBLE_MAX_VALUE); // return DataTypeUtils::infOrMax<Z>(); } return nd4j::math::nd4j_exp<Z,Z>(nd4j::math::nd4j_lgamma<X,Z>(a)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_igamma(X a, Y x) { Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a)); auto sum = Z(0.); auto denom = Z(1.); if (a <= X(0.000001)) //throw std::runtime_error("Cannot calculate gamma for a zero val."); return Z(0); for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) { denom *= (a + i); sum += nd4j_pow<X, int, Z>(x, i) / denom; } return aim * sum; } template <typename X, typename Y, typename Z> math_def inline Z nd4j_igammac(X a, Y x) { return Z(1.) - nd4j_igamma<X, Y, Z>(a, x); } #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMin(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMax(T* address, T val); template <> inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) { return atomicMin(address, val); } template <> inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) { return atomicMin(address, val); } template <> inline __device__ float nd4j_atomicMin<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = __float_as_int(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ double nd4j_atomicMin<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) { #if __CUDA_ARCH__ >= 350 return atomicMin((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { #if __CUDA_ARCH__ >= 350 return atomicMin((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = (unsigned long long)val, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) { int32_t temp = *address; *address = atomicMin(&temp, (int)val); return *address; } template <> inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) { return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data)); } template <> inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) { return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data)); } template <> inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) { return atomicMax(address, val); } template <> inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) { return atomicMax(address, val); } template <> inline __device__ double nd4j_atomicMax<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicMax<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = __float_as_int(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) { uint32_t temp = *address; *address = atomicMin(&temp, (uint32_t)val); return *address; } template <> inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) { int32_t temp = *address; *address = atomicMin(&temp, (int)val); return *address; } template <> inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) { uint32_t temp = *address; *address = atomicMin(&temp, (uint32_t)val); return *address; } template <> inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) { uint32_t temp = *address; *address = atomicMax(&temp, (uint32_t)val); return *address; } template <> inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) { int32_t temp = *address; *address = atomicMax(&temp, (int)val); return *address; } template <> inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) { uint32_t temp = *address; *address = atomicMax(&temp, (uint32_t)val); return *address; } template <> inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) { int32_t temp = *address; *address = atomicMax(&temp, (int32_t)val); return *address; } template <> inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) { auto address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = nd4j_max((float16) old.B.H, val); fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = nd4j_max((float16) old.B.L, val); fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) { auto address_as_ull = (int*) address; long addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = nd4j_max(old.B.H, val); fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = nd4j_max(old.B.L, val); fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) { #if __CUDA_ARCH__ >= 350 return atomicMax((unsigned long long*)address, (unsigned long long)val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = __double_as_longlong(val), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed)); } while (assumed != old); return old; #endif } template <> inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; //return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed)); } while (assumed != old); return old; } template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; //return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, val + assumed); } while (assumed != old); return old; } template <> inline __device__ long nd4j_atomicAdd<long>(long* address, long val) { unsigned long long* address_as_ull = (unsigned long long int *) address; // return atomicAdd(address, val); unsigned long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, val + assumed); } while (assumed != old); return old; } template <> inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) { return atomicAdd(address, val); } template <> inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) { // unsigned long long* address_as_ull = (unsigned long long int *) address; // //// return atomicAdd(address, val); // unsigned long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, val + assumed); // } while (assumed != old); // return old; return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { #if __CUDA_ARCH__ >= 700 && defined(CUDA_10) atomicAdd(reinterpret_cast<__half*>(address), val.data); #else auto address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; #endif } template <> inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) { auto address_as_ull = (int*) address; auto addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (address - 1); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = old.B.H + val; fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = old.B.L + val; fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <typename T> static inline __device__ T internal_16bit_atomicAdd(T* address, T val) { size_t shift = ((size_t)address & 2); int *base_address = (int *)((char*)address - shift); union I16PAIR { struct { T H; T L; } B; int W; __host__ __device__ I16PAIR() {}; __host__ __device__ ~I16PAIR() {}; }; I16PAIR pairNew, pairOld, pairAssumed; if (reinterpret_cast<int*>(address) == base_address) { pairOld.B.L = val; do { pairNew.B.L = pairOld.B.L; pairNew.B.H = pairOld.B.H + val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.H; } else { pairOld.B.H = val; do { pairNew.B.H = pairOld.B.H; pairNew.B.L = pairOld.B.L + val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.L; } } template <> inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) { return internal_16bit_atomicAdd<int16_t>(address, val); } template <> inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) { return internal_16bit_atomicAdd<uint16_t>(address, val); } template <> inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) { int res = *address; atomicAdd(&res, (int)val); *address = res; return *address; } template <> inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) { int res = *address; atomicAdd(&res, (int)val); *address = res; return *address; } template <> inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) { *address += (val); return *address; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { return nd4j_atomicAdd<double>(address, -val); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { return nd4j_atomicMul<double>(address, 1./val); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } //template <> //inline __device__ int nd4j_atomicAdd<int>(int* address, int val) { // return atomicAdd(address, val); //} template <> inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) { return (int32_t)atomicAdd((int*)address, (int)val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { return nd4j_atomicAdd<float>(address, -val); } template <> inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) { return nd4j_atomicAdd<float16>(address, -val); } template <> inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) { return nd4j_atomicAdd<bfloat16>(address, -val); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __int_as_float(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~3); unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int sel = selectors[(size_t)address & 3]; unsigned int old, assumed, mul, new_; old = *base_address; do { assumed = old; mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440); new_ = __byte_perm(old, mul, sel); if (new_ == old) break; old = atomicCAS(base_address, assumed, new_); } while (assumed != old); return (int8_t)old; } template <> inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~3); unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int sel = selectors[(size_t)address & 3]; unsigned int old, assumed, mul, new_; old = *base_address; do { assumed = old; mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440); new_ = __byte_perm(old, mul, sel); if (new_ == old) break; old = atomicCAS(base_address, assumed, new_); } while (assumed != old); return (uint8_t)old; } template <typename T> static inline __device__ T internal_16bit_atomicMul(T* address, T val) { size_t shift = ((size_t)address & 2); int *base_address = (int *)((char*)address - shift); union I16PAIR { struct { T H; T L; } B; int W; __host__ __device__ I16PAIR() {}; __host__ __device__ ~I16PAIR() {}; }; I16PAIR pairNew, pairOld, pairAssumed; if (reinterpret_cast<int*>(address) == base_address) { pairOld.B.L = val; do { pairNew.B.L = pairOld.B.L; pairNew.B.H = pairOld.B.H * val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.H; } else { pairOld.B.H = val; do { pairNew.B.H = pairOld.B.H; pairNew.B.L = pairOld.B.L * val; pairAssumed.W = pairOld.W; pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W); } while (pairAssumed.W != pairOld.W); return (T) pairOld.B.L; } } template <> inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) { return internal_16bit_atomicMul<int16_t>(address, val); } template <> inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) { return internal_16bit_atomicMul<uint16_t>(address, val); } template <> inline __device__ int nd4j_atomicMul<int>(int* address, int val) { int* res_address = address; int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return old; } template <> inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) { unsigned int* res_address = address; unsigned int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return old; } template <> inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) { unsigned long long int* res_address = (unsigned long long int*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (int64_t)old; } template <> inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) { unsigned long long int* res_address = (unsigned long long int*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (uint64_t)old; } #if !defined(_WIN32) && !defined(_WIN64) template <> inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) { unsigned long long int* res_address = (unsigned long long*)address; unsigned long long int old = *res_address, assumed; do { assumed = old; old = atomicCAS(res_address, assumed, val * assumed); } while (assumed != old); return (Nd4jLong)old; } #endif template <> inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) { return internal_16bit_atomicMul<bfloat16>(address, val); } template <> inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) { return internal_16bit_atomicMul<float16>(address, val); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { return nd4j_atomicMul<float>(address, 1.f / val); } template <> inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) { return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val); } template <> inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) { return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val); } } #endif } } #ifdef _OPENMP #ifndef MAX_FLOAT #define MAX_FLOAT 1e37 #endif #pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\ initializer (omp_priv=0) #pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0) #pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in * omp_out)\ initializer (omp_priv=1) #endif #endif /* TEMPLATEMATH_H_ */