source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_emult_phase0.c
//------------------------------------------------------------------------------ // GB_emult_phase0: find vectors of C to compute for C=A.*B or C<M>=A.*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // The eWise multiply of two matrices, C=A.*B, C<M>=A.*B, or C<!M>=A.*B starts // with this phase, which determines which vectors of C need to be computed. // On input, A and B are the two matrices being ewise multiplied, and M is the // optional mask matrix. If present, it is not complemented. // The M, A, and B matrices are sparse or hypersparse (not a slice or // hyperslice). C will be standard (if Ch is returned NULL) or hypersparse // (if Ch is returned non-NULL). // Ch: the vectors to compute in C. Not allocated, but equal to either // A->h, B->h, or M->h, or NULL if C is not hypersparse. // C_to_A: if A is hypersparse, and Ch is not A->h, then C_to_A [k] = kA // if the kth vector j = Ch [k] is equal to Ah [kA]. If j does not appear // in A, then C_to_A [k] = -1. Otherwise, C_to_A is returned as NULL. // C is always hypersparse in this case. // C_to_B: if B is hypersparse, and Ch is not B->h, then C_to_B [k] = kB // if the kth vector j = Ch [k] is equal to Bh [kB]. If j does not appear // in B, then C_to_B [k] = -1. Otherwise, C_to_B is returned as NULL. // C is always hypersparse in this case. // C_to_M: if M is hypersparse, and Ch is not M->h, then C_to_M [k] = kM // if the kth vector j = (Ch == NULL) ? k : Ch [k] is equal to Mh [kM]. // If j does not appear in M, then C_to_M [k] = -1. Otherwise, C_to_M is // returned as NULL. C is always hypersparse in this case. // FUTURE:: exploit A==M, B==M, and A==B aliases #include "GB_emult.h" GrB_Info GB_emult_phase0 // find vectors in C for C=A.*B or C<M>=A.*B ( int64_t *p_Cnvec, // # of vectors to compute in C const int64_t *restrict *Ch_handle, // Ch is M->h, A->h, B->h, or NULL int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL int64_t *restrict *C_to_B_handle, // C_to_B: size Cnvec, or NULL // original input: const GrB_Matrix M, // optional mask, may be NULL const GrB_Matrix A, const GrB_Matrix B, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_Cnvec != NULL) ; ASSERT (Ch_handle != NULL) ; ASSERT (C_to_A_handle != NULL) ; ASSERT (C_to_B_handle != NULL) ; ASSERT_OK (GB_check (A, "A for emult phase0", GB0)) ; ASSERT_OK (GB_check (B, "B for emult phase0", GB0)) ; ASSERT_OK_OR_NULL (GB_check (M, "M for emult phase0", GB0)) ; ASSERT (A->vdim == B->vdim) ; ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ; //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- const int64_t *restrict Ch = NULL ; int64_t *restrict C_to_M = NULL ; int64_t *restrict C_to_A = NULL ; int64_t *restrict C_to_B = NULL ; (*Ch_handle ) = NULL ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = NULL ; } (*C_to_A_handle) = NULL ; (*C_to_B_handle) = NULL ; //-------------------------------------------------------------------------- // get content of M, A, and B //-------------------------------------------------------------------------- int64_t n = A->vdim ; int64_t Anvec = A->nvec ; const int64_t *restrict Ah = A->h ; bool A_is_hyper = A->is_hyper ; ASSERT (!A->is_slice) ; int64_t Bnvec = B->nvec ; const int64_t *restrict Bh = B->h ; bool B_is_hyper = B->is_hyper ; ASSERT (!B->is_slice) ; int64_t Mnvec = 0 ; const int64_t *restrict Mh = NULL ; bool M_is_hyper = false ; if (M != NULL) { Mnvec = M->nvec ; Mh = M->h ; M_is_hyper = M->is_hyper ; ASSERT (!M->is_slice) ; } //-------------------------------------------------------------------------- // determine how to construct the vectors of C //-------------------------------------------------------------------------- if (M != NULL) { //---------------------------------------------------------------------- // 8 cases to consider: A, B, M can each be hyper or standard //---------------------------------------------------------------------- // Mask is present and not complemented if (A_is_hyper) { if (B_is_hyper) { if (M_is_hyper) { //---------------------------------------------------------- // (1) A hyper, B hyper, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Bh, Ah int64_t nvec = GB_IMIN (Anvec, Bnvec) ; nvec = GB_IMIN (nvec, Mnvec) ; if (nvec == Anvec) { Ch = Ah ; } else if (nvec == Bnvec) { Ch = Bh ; } else // (nvec == Mnvec) { Ch = Mh ; } } else { //---------------------------------------------------------- // (2) A hyper, B hyper, M standard: C hyper //---------------------------------------------------------- // Ch = smaller of Ah, Bh if (Anvec <= Bnvec) { Ch = Ah ; } else { Ch = Bh ; } } } else { if (M_is_hyper) { //---------------------------------------------------------- // (3) A hyper, B standard, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Ah if (Anvec <= Mnvec) { Ch = Ah ; } else { Ch = Mh ; } } else { //---------------------------------------------------------- // (4) A hyper, B standard, M standard: C hyper //---------------------------------------------------------- Ch = Ah ; } } } else { if (B_is_hyper) { if (M_is_hyper) { //---------------------------------------------------------- // (5) A standard, B hyper, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Bh if (Bnvec <= Mnvec) { Ch = Bh ; } else { Ch = Mh ; } } else { //---------------------------------------------------------- // (6) A standard, B hyper, M standard: C hyper //---------------------------------------------------------- Ch = Bh ; } } else { if (M_is_hyper) { //---------------------------------------------------------- // (7) A standard, B standard, M hyper: C hyper //---------------------------------------------------------- Ch = Mh ; } else { //---------------------------------------------------------- // (8) A standard, B standard, M standard: C standard //---------------------------------------------------------- ; } } } } else { //---------------------------------------------------------------------- // 4 cases to consider: A, B can be hyper or standard //---------------------------------------------------------------------- // Mask is not present, or present and complemented. if (A_is_hyper) { if (B_is_hyper) { //-------------------------------------------------------------- // (1) A hyper, B hyper: C hyper //-------------------------------------------------------------- // Ch = smaller of Ah, Bh if (Anvec <= Bnvec) { Ch = Ah ; } else { Ch = Bh ; } } else { //-------------------------------------------------------------- // (2) A hyper, B standard: C hyper //-------------------------------------------------------------- Ch = Ah ; } } else { if (B_is_hyper) { //-------------------------------------------------------------- // (3) A standard, B hyper: C hyper //-------------------------------------------------------------- Ch = Bh ; } else { //-------------------------------------------------------------- // (4) A standard, B standard: C standard //-------------------------------------------------------------- ; } } } //-------------------------------------------------------------------------- // find Cnvec //-------------------------------------------------------------------------- int64_t Cnvec ; if (Ch == NULL) { // C is standard Cnvec = n ; } else if (Ch == Ah) { Cnvec = Anvec ; } else if (Ch == Bh) { Cnvec = Bnvec ; } else // (Ch == Mh) { Cnvec = Mnvec ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // construct C_to_M mapping //-------------------------------------------------------------------------- if (M_is_hyper && Ch != Mh) { // allocate C_to_M GB_MALLOC_MEMORY (C_to_M, Cnvec, sizeof (int64_t)) ; if (C_to_M == NULL) { // out of memory return (GB_OUT_OF_MEMORY) ; } // compute C_to_M ASSERT (Ch != NULL) ; const int64_t *restrict Mp = M->p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < Cnvec ; k++) { int64_t pM, pM_end, kM = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ; C_to_M [k] = (pM < pM_end) ? kM : -1 ; } } //-------------------------------------------------------------------------- // construct C_to_A mapping //-------------------------------------------------------------------------- if (A_is_hyper && Ch != Ah) { // allocate C_to_A GB_MALLOC_MEMORY (C_to_A, Cnvec, sizeof (int64_t)) ; if (C_to_A == NULL) { // out of memory GB_FREE_MEMORY (C_to_M, Cnvec, sizeof (int64_t)) ; return (GB_OUT_OF_MEMORY) ; } // compute C_to_A ASSERT (Ch != NULL) ; const int64_t *restrict Ap = A->p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < Cnvec ; k++) { int64_t pA, pA_end, kA = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ; C_to_A [k] = (pA < pA_end) ? kA : -1 ; } } //-------------------------------------------------------------------------- // construct C_to_B mapping //-------------------------------------------------------------------------- if (B_is_hyper && Ch != Bh) { // allocate C_to_B GB_MALLOC_MEMORY (C_to_B, Cnvec, sizeof (int64_t)) ; if (C_to_B == NULL) { // out of memory GB_FREE_MEMORY (C_to_M, Cnvec, sizeof (int64_t)) ; GB_FREE_MEMORY (C_to_A, Cnvec, sizeof (int64_t)) ; return (GB_OUT_OF_MEMORY) ; } // compute C_to_B ASSERT (Ch != NULL) ; const int64_t *restrict Bp = B->p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < Cnvec ; k++) { int64_t pB, pB_end, kB = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ; C_to_B [k] = (pB < pB_end) ? kB : -1 ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- (*p_Cnvec ) = Cnvec ; (*Ch_handle ) = Ch ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = C_to_M ; } (*C_to_A_handle) = C_to_A ; (*C_to_B_handle) = C_to_B ; //-------------------------------------------------------------------------- // The code below describes what the output contains: //-------------------------------------------------------------------------- #ifdef GB_DEBUG ASSERT (A != NULL) ; // A and B are always present ASSERT (B != NULL) ; int64_t jlast = -1 ; for (int64_t k = 0 ; k < Cnvec ; k++) { // C(:,j) is in the list, as the kth vector int64_t j ; if (Ch == NULL) { // C will be constructed as standard sparse j = k ; } else { // C will be constructed as hypersparse j = Ch [k] ; } // vectors j in Ch are sorted, and in the range 0:n-1 ASSERT (j >= 0 && j < n) ; ASSERT (j > jlast) ; jlast = j ; // see if A (:,j) exists if (C_to_A != NULL) { // A is hypersparse ASSERT (A->is_hyper) int64_t kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { int64_t jA = A->h [kA] ; ASSERT (j == jA) ; } } else if (A->is_hyper) { // A is hypersparse, and Ch is a shallow copy of A->h ASSERT (Ch == A->h) ; } // see if B (:,j) exists if (C_to_B != NULL) { // B is hypersparse ASSERT (B->is_hyper) int64_t kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { int64_t jB = B->h [kB] ; ASSERT (j == jB) ; } } else if (B->is_hyper) { // A is hypersparse, and Ch is a shallow copy of A->h ASSERT (Ch == B->h) ; } // see if M (:,j) exists if (Ch != NULL && M != NULL && Ch == M->h) { // Ch is the same as Mh ASSERT (M != NULL) ; ASSERT (M->is_hyper) ; ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ; ASSERT (C_to_M == NULL) ; } else if (C_to_M != NULL) { // M is present and hypersparse ASSERT (M != NULL) ; ASSERT (M->is_hyper) ; int64_t kM = C_to_M [k] ; ASSERT (kM >= -1 && kM < M->nvec) ; if (kM >= 0) { int64_t jM = M->h [kM] ; ASSERT (j == jM) ; } } else { // M is not present, or in standard form ASSERT (M == NULL || !(M->is_hyper)) ; } } #endif return (GrB_SUCCESS) ; }
Shatter.h
#pragma once #ifndef SHATTER_H #define SHATTER_H #define NO_DEFINES #include "d3d9.h" #include "Objects\SuperSectors.h" #include "Collision.h" struct RenderableVertex : Vertex { Vertex n; BYTE color[4]; float uv[2]; RenderableVertex(float _x, float _y, float _z) : Vertex(_x, _y, _z) { } }; //#define p_stride 0x24//*(DWORD*)0x0090B8A8 static const float DEFAULT_AREA_TEST = 288.0f; static const float DEFAULT_VELOCITY_VARIANCE = 0.0f; static const float DEFAULT_SPREAD_FACTOR = 1.0f; static const float DEFAULT_LIFETIME = 4.0f; static const float DEFAULT_BOUNCE = -10000.0f; static const float DEFAULT_BOUNCE_AMPLITUDE = 0.8f; struct sTriSubdivideStack { static const int TRI_SUBDIVIDE_STACK_SIZE = 16 * 1024 * 4; void Reset(void); void Clear(void); bool IsEmpty(void) { return m_offset == 0; } void SetBlockSize(int size) { m_block_size = size; } int GetBlockSize(void) { return m_block_size; } void Pop(void* p_data); void Push(void* p_data); const void* Peek(DWORD index); private: int m_offset; int m_block_size; char m_data[TRI_SUBDIVIDE_STACK_SIZE]; }; Vertex shatterVelocity; float shatterAreaTest = DEFAULT_AREA_TEST * DEFAULT_AREA_TEST; float shatterVelocityVariance = DEFAULT_VELOCITY_VARIANCE; float shatterSpreadFactor = DEFAULT_SPREAD_FACTOR; float shatterLifetime = DEFAULT_LIFETIME; float shatterBounce = DEFAULT_BOUNCE; float shatterBounceAmplitude = DEFAULT_BOUNCE_AMPLITUDE; sTriSubdivideStack triSubdivideStack; float shatterAreas[3 * 1024]; struct ShatterData { Mesh::MaterialSplit* split; SuperSector* sector; BYTE* verts; D3DXVECTOR3* pos; D3DXVECTOR3* vel; Matrix* matrices; //BYTE* tris; int numTris; D3DXVECTOR3 shatter_point; DWORD numShatteredTris; int* shatteredTris; float life; float gravity; float bounce; float bounceAmp; D3DXVECTOR3* old_pos; bool* collided; Collision::CollData data; Collision::CollCache cache; std::vector<RenderableVertex> shattered_pieces; ~ShatterData() { debug_print("Deleting ShatterObject %s\n", FindChecksumName(sector->name)); delete[] verts; delete[]pos; delete[]vel; delete[]matrices; delete[]old_pos; delete[]collided; //delete[]tris; ZeroMemory(this, sizeof(ShatterData)); } void Render() { //printf("Split %p Material %X\n", split, split->material); if (split->material && split->material->texture) { //debug_print("Going to submit material\n"); split->material->Submit(); //debug_print("VertexShader %X stride %X\n", split->vertexShader, split->stride); Gfx::pDevice->SetFVF(split->vertexShader); /*debug_print("VBuffer %p IBuffer %p stride %X primCount %d vertCount %d\n", split->vertexBuffer->GetProxyInterface(), split->indexBuffer->GetProxyInterface(), split->stride, split->numIndices, split->numVertices); Gfx::pDevice->SetStreamSource(0, split->vertexBuffer->GetProxyInterface(), 0, split->stride); Gfx::pDevice->SetIndices(split->indexBuffer->GetProxyInterface()); Gfx::pDevice->DrawIndexedPrimitive(D3DPT_TRIANGLESTRIP, split->baseIndex, 0, split->numVertices, 0, split->numIndices);*/ //debug_print("Going to Draw\n"); Gfx::pDevice->DrawPrimitiveUP(D3DPT_TRIANGLELIST, numTris, verts, split->stride); /*if(shattered_pieces.size()) Gfx::pDevice->DrawPrimitiveUP(D3DPT_TRIANGLELIST, shattered_pieces.size() / 3, &shattered_pieces.front(), split->stride);*/ //debug_print("Finished rendering, sucessfully :)\n"); } } void Update(float framelength) { //debug_print("NewShatter: Going to update %s (delta:%f)\n", FindChecksumName(sector->name), framelength); BYTE* p_vert_data = verts; DWORD stride = split->stride; // Load up initial three vertex pointers. D3DXVECTOR3* p_v0 = (D3DXVECTOR3*)(p_vert_data); D3DXVECTOR3* p_v1 = (D3DXVECTOR3*)(p_vert_data + stride); D3DXVECTOR3* p_v2 = (D3DXVECTOR3*)(p_vert_data + (2 * stride)); //for some reason this breaks the code... //#pragma omp parallel for for (int i = 0; i < numTris; ++i) { if (!collided[i]) { // To move the shatter pieces: // 1) subtract position from each vertex // 2) rotate // 3) update position with velocity // 4) add new position to each vertex // The matrix holds 3 vectors at once. /*D3DXVECTOR3* p_v0 = (D3DXVECTOR3*)(((BYTE*)base_p_v0) + ((stride * 3) * shatteredTris[i])); D3DXVECTOR3* p_v1 = (D3DXVECTOR3*)(((BYTE*)base_p_v1) + ((stride * 3) * shatteredTris[i])); D3DXVECTOR3* p_v2 = (D3DXVECTOR3*)(((BYTE*)base_p_v2) + ((stride * 3) * shatteredTris[i])); if (true) {*/ Matrix m; m[X].Set(p_v0->x - pos[i][X], p_v0->y - pos[i][Y], p_v0->z - pos[i][Z]); m[Y].Set(p_v1->x - pos[i][X], p_v1->y - pos[i][Y], p_v1->z - pos[i][Z]); m[Z].Set(p_v2->x - pos[i][X], p_v2->y - pos[i][Y], p_v2->z - pos[i][Z]); m[X].Rotate(matrices[i]); m[Y].Rotate(matrices[i]); m[Z].Rotate(matrices[i]); // Update the position and velocity of the shatter piece, dealing with bouncing if necessary. if (!UpdateParameters(i, framelength, p_v0, p_v1, p_v2)) { *p_v0 = *(D3DXVECTOR3*)&m[X];// +pos[i]; *p_v1 = *(D3DXVECTOR3*)&m[Y];// +pos[i]; *p_v2 = *(D3DXVECTOR3*)&m[Z];// +pos[i]; Vertex normal = CalculateNormal(p_v0, p_v1, p_v2); printf("normal %f %f %f data %f %f %f\n", normal.x, normal.y, normal.z, data.normal.x, data.normal.y, data.normal.z); float angle = fabsf(data.normal.y - normal.y); if (angle > 0.15f && i % 5 == 0) { RenderableVertex v00 = RenderableVertex(p_v0->x, p_v0->y, p_v0->z); RenderableVertex v01(p_v1->x, p_v1->y, p_v1->z); RenderableVertex v02(p_v2->x, p_v2->y, p_v2->z); RenderableVertex v0(p_v0->x + ((p_v1->x - p_v0->x) * 0.5f), p_v0->y + ((p_v1->y - p_v0->y) * 0.5f), p_v0->z + ((p_v1->z - p_v0->z) * 0.5f)); RenderableVertex v1(p_v1->x + ((p_v2->x - p_v1->x) * 0.5f), p_v1->y + ((p_v2->y - p_v1->y) * 0.5f), p_v1->z + ((p_v2->z - p_v1->z) * 0.5f)); RenderableVertex v2(p_v2->x + ((p_v0->x - p_v2->x) * 0.5f), p_v2->y + ((p_v0->y - p_v2->y) * 0.5f), p_v2->z + ((p_v0->z - p_v2->z) * 0.5f)); DWORD uv_offset = 12; //Need to learn how to check if have normals... if (sector->color_offset) { uv_offset += 4; DWORD color_offset = 12; BYTE* p_v0col = (BYTE*)(p_v0 + color_offset); BYTE* p_v1col = (BYTE*)(p_v1 + color_offset); BYTE* p_v2col = (BYTE*)(p_v2 + color_offset); for (int i = 0; i < 4; ++i) { v0.color[i] = p_v0col[i] + (((int)p_v1col[i] - (int)p_v0col[i]) / 2); v1.color[i] = p_v1col[i] + (((int)p_v2col[i] - (int)p_v1col[i]) / 2); v2.color[i] = p_v2col[i] + (((int)p_v0col[i] - (int)p_v2col[i]) / 2); } } // Deal with uv0 (not always present). if (sector->uv_offset) { float* p_v0uv = (float*)(p_v0 + uv_offset); float* p_v1uv = (float*)(p_v1 + uv_offset); float* p_v2uv = (float*)(p_v2 + uv_offset); for (int i = 0; i < 2; ++i) { v0.uv[i] = p_v0uv[i] + ((p_v1uv[i] - p_v0uv[i]) * 0.5f); v1.uv[i] = p_v1uv[i] + ((p_v2uv[i] - p_v1uv[i]) * 0.5f); v2.uv[i] = p_v2uv[i] + ((p_v0uv[i] - p_v2uv[i]) * 0.5f); } } // Push the four new tris onto the stack. v00 += pos[i]; v01 += pos[i]; v02 += pos[i]; v0 += pos[i]; v1 += pos[i]; v2 += pos[i]; shattered_pieces.push_back(v00); shattered_pieces.push_back(v0); shattered_pieces.push_back(v2); shattered_pieces.push_back(v0); shattered_pieces.push_back(v01); shattered_pieces.push_back(v1); shattered_pieces.push_back(v0); shattered_pieces.push_back(v1); shattered_pieces.push_back(v2); shattered_pieces.push_back(v2); shattered_pieces.push_back(v1); shattered_pieces.push_back(v02); *p_v0 = Vertex(0, 0, 0); *p_v1 = Vertex(0, 0, 0); *p_v2 = Vertex(0, 0, 0); } else { *p_v0 += pos[i]; *p_v1 += pos[i]; *p_v2 += pos[i]; } } else { *(D3DXVECTOR3*)&m[X] += pos[i]; *(D3DXVECTOR3*)&m[Y] += pos[i]; *(D3DXVECTOR3*)&m[Z] += pos[i]; Vertex temp0, temp1, temp2; p_v0->x = m[X][X]; p_v0->y = m[X][Y]; p_v0->z = m[X][Z]; p_v1->x = m[Y][X]; p_v1->y = m[Y][Y]; p_v1->z = m[Y][Z]; p_v2->x = m[Z][X]; p_v2->y = m[Z][Y]; p_v2->z = m[Z][Z]; } } p_v0 = (D3DXVECTOR3*)(((BYTE*)p_v0) + (stride * 3)); p_v1 = (D3DXVECTOR3*)(((BYTE*)p_v1) + (stride * 3)); p_v2 = (D3DXVECTOR3*)(((BYTE*)p_v2) + (stride * 3)); //} } // Also process normals if they exist. /*if (sector->normals) { p_v0 = sector->normals; p_v1 = sector->normals + 1; p_v2 = sector->normals + 2; for (int i = 0; i < numTris; ++i) { // The matrix holds 3 vectors at once. Matrix m; m[X].Set(p_v0->x, p_v0->y, p_v0->z); m[Y].Set(p_v1->x, p_v1->y, p_v1->z); m[Z].Set(p_v2->x, p_v2->y, p_v2->z); m[X].Rotate(matrices[i]); m[Y].Rotate(matrices[i]); m[Z].Rotate(matrices[i]); p_v0->x = m[X][X]; p_v0->y = m[X][Y]; p_v0->z = m[X][Z]; p_v1->x = m[Y][X]; p_v1->y = m[Y][Y]; p_v1->z = m[Y][Z]; p_v2->x = m[Z][X]; p_v2->y = m[Z][Y]; p_v2->z = m[Z][Z]; p_v0 = (D3DXVECTOR3*)(((BYTE*)p_v0) + (stride * 3)); p_v1 = (D3DXVECTOR3*)(((BYTE*)p_v1) + (stride * 3)); p_v2 = (D3DXVECTOR3*)(((BYTE*)p_v2) + (stride * 3)); } }*/ //debug_print("Finished updating, now going to render..\n"); } ShatterData(SuperSector* _sector, Mesh::MaterialSplit* _split, int numTriangles) { /*pos = new D3DXVECTOR3[numTriangles]; vel = new D3DXVECTOR3[numTriangles]; matrices = new Matrix[numTriangles]; numTris = 0; tris = new BYTE[numTriangles * 3]; */ pos = NULL; vel = NULL; matrices = NULL; verts = new BYTE[numTriangles * 3 * _split->stride]; numTris = 0; sector = _sector; split = _split; gravity = Gfx::shatter_gravity; life = shatterLifetime * Gfx::shatter_life_factor; bounce = shatterBounce; bounceAmp = shatterBounceAmplitude; BBox bbox = *sector->GetBBox(); RwLine line; line.start = bbox.min; line.end = bbox.max; line.end.y -= 4500.f; line.start.x -= 50.0f; line.start.z -= 50.0f; line.end.x += 50.0f; line.end.z += 50.0f; cache.Update(line); data = Collision::CollData(&cache); debug_print("Allocated memory\n"); } void Allocate() { pos = new D3DXVECTOR3[numTris]; vel = new D3DXVECTOR3[numTris]; matrices = new Matrix[numTris]; old_pos = new D3DXVECTOR3[numTris]; collided = new bool[numTris]; ZeroMemory(collided, numTris); if (pos && vel && matrices) debug_print("Memory alllocated successfully\n"); else debug_print("Memory did not allocate...\n"); } bool UpdateParameters(int index, float timestep, const D3DXVECTOR3* const __restrict v0, const D3DXVECTOR3* const __restrict v1, const D3DXVECTOR3* const __restrict v2) { old_pos[index] = pos[index]; pos[index] += vel[index] * timestep; if ((pos[index][Y] < bounce) && (vel[index][Y] < 0.0f)) { // Hit the floor. Bounce back up. pos[index][Y] = bounce + (bounce - pos[index][Y]); pos[index][Y] = vel[index][Y] * -bounceAmp; // And figure a new rotation matrix. Vertex axis(-1.0f + (2.0f * (float)rand() / RAND_MAX), -1.0f + (2.0f * (float)rand() / RAND_MAX), -1.0f + (2.0f * (float)rand() / RAND_MAX)); axis.Normalize(); matrices[index].Ident(); matrices[index].Rotate(axis, 0.1f * ((float)rand() / RAND_MAX)); } vel[index][Y] -= gravity * timestep; if (old_pos[index] != pos[index]) { RwLine line; line.start = old_pos[index]; line.end = pos[index]; if (Collision::FindNearestCollision(line, data) && data.normal.y > 0.1f) { /*matrices[index].Ident(); matrices[index].Rotate(data.normal, 90.0f);*/ collided[index] = true; pos[index] = data.point; return false; } Vertex old_start = line.start; line.start.x += v0->x; line.start.y += v0->y; line.start.z += v0->z; Vertex old_end = line.end; line.end.x += v0->x; line.end.y += v0->y; line.end.z += v0->z; if (Collision::FindNearestCollision(line, data) && data.normal.y > 0.1f) { /*matrices[index].Ident(); matrices[index].Rotate(data.normal, 90.0f);*/ collided[index] = true; pos[index] = data.point; return false; } line.start = old_start; line.start.x += v1->x; line.start.y += v1->y; line.start.z += v1->z; line.end = old_end; line.end.x += v1->x; line.end.y += v1->y; line.end.z += v1->z; if (Collision::FindNearestCollision(line, data) && data.normal.y > 0.1f) { /*matrices[index].Ident(); matrices[index].Rotate(data.normal, 90.0f);*/ collided[index] = true; pos[index] = data.point; return false; } line.start = old_start; line.start.x += v2->x; line.start.y += v2->y; line.start.z += v2->z; line.end = old_end; line.end.x += v2->x; line.end.y += v2->y; line.end.z += v2->z; if (Collision::FindNearestCollision(line, data) && data.normal.y > 0.1f) { /*matrices[index].Ident(); matrices[index].Rotate(data.normal, 90.0f);*/ collided[index] = true; pos[index] = data.point; return false; } } return true; } }; void ShatterSetParams(Vertex& velocity, float area_test, float velocity_variance, float spread_factor, float lifetime, float bounce, float bounce_amplitude) { //Replay::WriteShatterParams(velocity, area_test, velocity_variance, spread_factor, lifetime, bounce, bounce_amplitude); shatterVelocity = velocity; shatterAreaTest = (area_test == 0.0f) ? (DEFAULT_AREA_TEST * DEFAULT_AREA_TEST) : (area_test * area_test); shatterVelocityVariance = (velocity_variance == 0.0f) ? DEFAULT_VELOCITY_VARIANCE : velocity_variance; shatterSpreadFactor = (spread_factor == 0.0f) ? DEFAULT_SPREAD_FACTOR : spread_factor; shatterLifetime = (lifetime == 0.0f) ? DEFAULT_LIFETIME : lifetime; shatterBounce = (bounce == 0.0f) ? DEFAULT_BOUNCE : bounce; shatterBounceAmplitude = (bounce_amplitude == 0.0f) ? DEFAULT_BOUNCE_AMPLITUDE : bounce_amplitude; } std::vector<ShatterData*> shatterObjects; std::vector<SuperSector*> PointyObjects; std::vector<ColouredVertex> bbox_rails; void ShatterSuperSector(SuperSector* super_sector); bool NewShatterScript(CStruct* pStruct, CScript* pScript); /******************************************************************/ /* */ /* */ /******************************************************************/ bool subdivide_tri_stack(BYTE** p_write, SuperSector* sector, float targetShatterArea, int& numTris) { static float dividers[4] = { 0.5f, 0.6f, 0.2f, 0.33f }; float divider = dividers[0/*rand() % 3*/]; // Three temporary buffers. static BYTE v0[256]; static BYTE v1[256]; static BYTE v2[256]; // Three more temporary buffers. static BYTE i01[256]; static BYTE i12[256]; static BYTE i20[256]; // If there are elements on the stack, pop off the top three vertices and subdivide if necessary. if (triSubdivideStack.IsEmpty()) { debug_print("triStack empty\n"); return false; } D3DXVECTOR3* p_v0 = (D3DXVECTOR3*)v0; D3DXVECTOR3* p_v1 = (D3DXVECTOR3*)v1; D3DXVECTOR3* p_v2 = (D3DXVECTOR3*)v2; // Stack is LIFO, so Pop() off in reverse order. triSubdivideStack.Pop(p_v2); triSubdivideStack.Pop(p_v1); triSubdivideStack.Pop(p_v0); // Calculate the area of this tri. Vertex p(p_v1->x - p_v0->x, p_v1->y - p_v0->y, p_v1->z - p_v0->z); Vertex q(p_v2->x - p_v0->x, p_v2->y - p_v0->y, p_v2->z - p_v0->z); Vertex r((p[Y] * q[Z]) - (q[Y] * p[Z]), (p[Z] * q[X]) - (q[Z] * p[X]), (p[X] * q[Y]) - (q[X] * p[Y])); float area_squared = r.LengthSqr(); if (area_squared > targetShatterArea) { debug_print("subdividing tri\n"); // We need to subdivide this tri. Calculate the three intermediate points. int block_size = triSubdivideStack.GetBlockSize(); memcpy(i01, v0, block_size); memcpy(i12, v1, block_size); memcpy(i20, v2, block_size); // Deal with positions (always present). ((D3DXVECTOR3*)i01)->x = p_v0->x + ((p_v1->x - p_v0->x) * divider); ((D3DXVECTOR3*)i01)->y = p_v0->y + ((p_v1->y - p_v0->y) * divider); ((D3DXVECTOR3*)i01)->z = p_v0->z + ((p_v1->z - p_v0->z) * divider); ((D3DXVECTOR3*)i12)->x = p_v1->x + ((p_v2->x - p_v1->x) * divider); ((D3DXVECTOR3*)i12)->y = p_v1->y + ((p_v2->y - p_v1->y) * divider); ((D3DXVECTOR3*)i12)->z = p_v1->z + ((p_v2->z - p_v1->z) * divider); ((D3DXVECTOR3*)i20)->x = p_v2->x + ((p_v0->x - p_v2->x) * divider); ((D3DXVECTOR3*)i20)->y = p_v2->y + ((p_v0->y - p_v2->y) * divider); ((D3DXVECTOR3*)i20)->z = p_v2->z + ((p_v0->z - p_v2->z) * divider); //Need to learn how to check if have normals... if (sector->color_offset) { DWORD color_offset = (DWORD)sector->color_offset - (DWORD)sector->vertices; BYTE* p_v0col = (BYTE*)(v0 + color_offset); BYTE* p_v1col = (BYTE*)(v1 + color_offset); BYTE* p_v2col = (BYTE*)(v2 + color_offset); BYTE* p_i01col = (BYTE*)(i01 + color_offset); BYTE* p_i12col = (BYTE*)(i12 + color_offset); BYTE* p_i20col = (BYTE*)(i20 + color_offset); for (int i = 0; i < 4; ++i) { p_i01col[i] = p_v0col[i] + (((int)p_v1col[i] - (int)p_v0col[i]) / 2); p_i12col[i] = p_v1col[i] + (((int)p_v2col[i] - (int)p_v1col[i]) / 2); p_i20col[i] = p_v2col[i] + (((int)p_v0col[i] - (int)p_v2col[i]) / 2); } } // Deal with uv0 (not always present). if (sector->uv_offset) { DWORD uv_offset = (DWORD)sector->uv_offset - (DWORD)sector->vertices; float* p_v0uv = (float*)(v0 + uv_offset); float* p_v1uv = (float*)(v1 + uv_offset); float* p_v2uv = (float*)(v2 + uv_offset); float* p_i01uv = (float*)(i01 + uv_offset); float* p_i12uv = (float*)(i12 + uv_offset); float* p_i20uv = (float*)(i20 + uv_offset); for (int i = 0; i < 2; ++i) { p_i01uv[i] = p_v0uv[i] + ((p_v1uv[i] - p_v0uv[i]) * divider); p_i12uv[i] = p_v1uv[i] + ((p_v2uv[i] - p_v1uv[i]) * divider); p_i20uv[i] = p_v2uv[i] + ((p_v0uv[i] - p_v2uv[i]) * divider); } } // Push the four new tris onto the stack. triSubdivideStack.Push(v0); triSubdivideStack.Push(i01); triSubdivideStack.Push(i20); triSubdivideStack.Push(i01); triSubdivideStack.Push(v1); triSubdivideStack.Push(i12); triSubdivideStack.Push(i01); triSubdivideStack.Push(i12); triSubdivideStack.Push(i20); triSubdivideStack.Push(i20); triSubdivideStack.Push(i12); triSubdivideStack.Push(v2); } else { // Don't need to subdivide this tri. int block_size = triSubdivideStack.GetBlockSize(); // Just copy the tri into the next available slot. memcpy(*p_write, v0, block_size); *p_write += block_size; memcpy(*p_write, v1, block_size); *p_write += block_size; memcpy(*p_write, v2, block_size); *p_write += block_size; numTris++; } return true; } #endif
ef_layout.h
#pragma once #include <folly/experimental/EliasFanoCoding.h> #include <iostream> #include "csr.h" #include "ef_param.h" #include "util.h" #define GET_CHUNK_SIZE(V, S, ...) get_num_chunks_vec<V, S>(__VA_ARGS__) template <size_t kSkipQuantum, size_t kForwardQuantum> class EFGraph; template <size_t kSkipQuantum, size_t kForwardQuantum> class EFLayout { const CSR& csr_; size_t num_vertices_, num_edges_; std::vector<size_t> chunk_off_exsum_; ef_param_t ef_param_; size_t offset_size_, data_size_, num_lower_bits_size_, degree_size_, storage_size_; void set_ef_params(); template <typename Value, typename SkipValue> size_t get_nbr_list_size(uint64_t vid) const; template <typename Value, typename SkipValue> void get_num_chunks_vec( std::vector<size_t>& num_chunks_vec, uint8_t chunk_align_size) const; friend class EFGraph<kSkipQuantum, kForwardQuantum>; public: EFLayout(const CSR& csr) : csr_(csr), num_vertices_(csr_.get_num_vertices()), num_edges_(csr_.get_num_edges()) { set_ef_params(); } ef_param_t get_ef_params() const { return ef_param_; } size_t get_storage_size() const { return storage_size_; } }; // Get vid's neighbour list's size when encoded with EF template <size_t kSkipQuantum, size_t kForwardQuantum> template <typename Value, typename SkipValue> size_t EFLayout<kSkipQuantum, kForwardQuantum>::get_nbr_list_size( uint64_t vid) const { using namespace folly::compression; typedef EliasFanoEncoderV2<Value, SkipValue, kSkipQuantum, kForwardQuantum> Encoder; size_t c_size = 0; auto it_pair = csr_.get_nbrs(vid); auto start = it_pair.first; auto end = it_pair.second; if (start != end) { c_size = Encoder::Layout::fromUpperBoundAndSize( static_cast<Value>(*(end - 1)), static_cast<size_t>(end - start)) .bytes(); } return c_size; } template <size_t kSkipQuantum, size_t kForwardQuantum> template <typename Value, typename SkipValue> void EFLayout<kSkipQuantum, kForwardQuantum>::get_num_chunks_vec( std::vector<size_t>& num_chunks_vec, uint8_t chunk_align_size) const { assert(num_chunks_vec.size() == num_vertices_); #pragma omp parallel for for (size_t i = 0; i < num_chunks_vec.size(); i++) { size_t size = get_nbr_list_size<Value, SkipValue>(i); num_chunks_vec[i] = (size + chunk_align_size - 1) / chunk_align_size; } } template <size_t kSkipQuantum, size_t kForwardQuantum> void EFLayout<kSkipQuantum, kForwardQuantum>::set_ef_params() { uint8_t val_type_size, edge_type_size, fwd_type_size, degree_type_size, chunk_align_size, chunk_offset_size; val_type_size = get_unsigned_type_size(num_vertices_); edge_type_size = get_unsigned_type_size(num_edges_); uint64_t max_degree = csr_.get_max_degree(); degree_type_size = get_unsigned_type_size(max_degree); // Forward pointers store the number of zeros. Can be max 2n. // There are no forward pointers // if (!kForwardQuantum || kForwardQuantum > max_degree + 1) // in which case fwd_type_size = sizeof(uint8_t), // but this is simpler and reduces compilation time since we need to // instantiate fewer things. fwd_type will be the same as the degree type // or one type larger than that. The common case is that we have forward // pointers. fwd_type_size = get_unsigned_type_size(max_degree * 2); // Right now chunks are aligned to forward values. // This can be changed later to remove alignment restrictions // However, that would require reading unaligned forward values in // CUDA code chunk_align_size = fwd_type_size; std::vector<size_t> num_chunks_vec(num_vertices_); // Get num_chunks for each neighbour list VS2_GEN_ARGS( val_type_size, fwd_type_size, GET_CHUNK_SIZE, num_chunks_vec, chunk_align_size); chunk_off_exsum_.resize(num_vertices_ + 1); chunk_off_exsum_[0] = 0; for (size_t i = 1; i < num_vertices_ + 1; i++) { chunk_off_exsum_[i] = chunk_off_exsum_[i - 1] + num_chunks_vec[i - 1]; } size_t total_chunks = *chunk_off_exsum_.rbegin(); chunk_offset_size = get_unsigned_type_size(total_chunks); offset_size_ = chunk_off_exsum_.size() * chunk_offset_size; data_size_ = total_chunks * chunk_align_size; degree_size_ = degree_type_size * num_vertices_; num_lower_bits_size_ = sizeof(uint8_t) * num_vertices_; storage_size_ = offset_size_ + data_size_ + degree_size_ + num_lower_bits_size_; ef_param_ = {val_type_size, edge_type_size, fwd_type_size, degree_type_size, chunk_align_size, chunk_offset_size}; }
dataset.h
#ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/utils/random.h> //#include <LightGBM/utils/text_reader.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/meta.h> #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <vector> #include <utility> #include <functional> #include <string> #include <unordered_set> #include <mutex> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, qurey level informations. * * Some details: * 1. Label, used for traning. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed) * the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1]) * 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null costructor */ Metadata(); /*! * \brief Initialization will load qurey level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename, const char* initscore_file); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indice of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(const char* initscore_file); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int TotalColumns() const = 0; /*! * \brief Create a object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to traning or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>& bin_mappers, int** sample_non_zero_indices, const int* num_per_col, size_t total_sample_cnt, const Config& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>& ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, bool is_constant_hessian, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i) const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } inline double FeaturePenalte(int i) const { if (feature_penalty_.empty()) { return 1; } else { return feature_penalty_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureBin(int i) const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group) const { return feature_groups_[group]->is_sparse_; } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name: feature_names_){ if (feature_name.find(' ') != std::string::npos){ spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } } if (spaceInFeatureName){ Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; std::vector<int8_t> monotone_types_; std::vector<double> feature_penalty_; bool is_finish_load_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) { for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(32*t3+Nx+28,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" #include "magick/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *,DDSVector4 *,unsigned char *,size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse, matte; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { matte = MagickTrue; decoder = ReadUncompressedRGBA; } else { matte = MagickTrue; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { matte = MagickFalse; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { matte = MagickFalse; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { matte = MagickTrue; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { matte = MagickTrue; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); for (n = 0; n < num_images; n++) { if (n != 0) { if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); /* Start a new image */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->matte = matte; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } (void) SetImageBackgroundColor(image); if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; PixelPacket *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if (((x + i) < (ssize_t) image->columns) && ((y + j) < (ssize_t) image->rows)) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code])); if ((colors.a[code] != 0) && (image->matte == MagickFalse)) image->matte=MagickTrue; /* Correct matte */ q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; PixelPacket *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; MagickSizeType alpha_bits; PixelPacket *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image))); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(q,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } SetPixelAlpha(q,QuantumRange); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleMatteType); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else if (alphaBits == 2) { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (color >> 8))); SetPixelGray(q,ScaleCharToQuantum((unsigned char)color)); } else { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255))); } } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = SetMagickInfo("DDS"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT1"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT5"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { register ssize_t i; MagickOffsetType offset; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; if ((w == 1) && (h == 1)) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if (clusterFit == MagickFalse || count == 0) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (!image->matte) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, &image->exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,&image->exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MaxTextExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->matte) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->matte != MagickFalse) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->matte != MagickFalse) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t i, y, bx, by; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const PixelPacket *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p++; match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char* indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4* points, const ssize_t* map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p))); if (image->matte) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p))); p++; } } }
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = (std::max)(max_seen, e.u); max_seen = (std::max)(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = (std::min)((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = (std::min)((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } return SquishGraph(g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
GB_binop__isne_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_01__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_03__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int16) // A*D function (colscale): GB (_AxD__isne_int16) // D*A function (rowscale): GB (_DxB__isne_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int16) // C=scalar+B GB (_bind1st__isne_int16) // C=scalar+B' GB (_bind1st_tran__isne_int16) // C=A+scalar GB (_bind2nd__isne_int16) // C=A'+scalar GB (_bind2nd_tran__isne_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mobilenet_128.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 128 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING \ 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS static double pw_conv_time = 0.0; static double dense_time = 0.0; /****************************************************************************************************************************/ int im_sizes[27] = {128, 128, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2}; int strides[26] = {1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; int mem_block_shape[3] = { 1024, 128, 128}; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to // estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2}; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 128, 3, CONV_SIZE, CONV_SIZE }, { 128, 1, CONV_SIZE, CONV_SIZE }, { 43, 128, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = {{128, 3, CONV_SIZE, CONV_SIZE}, {128, 1, CONV_SIZE, CONV_SIZE}, {43, 128, 1, 1}, {43, 1, CONV_SIZE, CONV_SIZE}, {85, 43, 1, 1}, {85, 1, CONV_SIZE, CONV_SIZE}, {70, 85, 1, 1}, {70, 1, CONV_SIZE, CONV_SIZE}, {150, 70, 1, 1}, {150, 1, CONV_SIZE, CONV_SIZE}, {69, 150, 1, 1}, {69, 1, CONV_SIZE, CONV_SIZE}, {188, 69, 1, 1}, {188, 1, CONV_SIZE, CONV_SIZE}, {72, 188, 1, 1}, {72, 1, CONV_SIZE, CONV_SIZE}, {122, 72, 1, 1}, {122, 1, CONV_SIZE, CONV_SIZE}, {106, 122, 1, 1}, {106, 1, CONV_SIZE, CONV_SIZE}, {96, 106, 1, 1}, {96, 1, CONV_SIZE, CONV_SIZE}, {81, 96, 1, 1}, {81, 1, CONV_SIZE, CONV_SIZE}, {75, 81, 1, 1}, {75, 1, CONV_SIZE, CONV_SIZE}, {100, 75, 1, 1} }; int dshape[1][2] = {{100, 10}}; #else // PLAIN int cshape[27][4] = {{128, 3, CONV_SIZE, CONV_SIZE}, {128, 1, CONV_SIZE, CONV_SIZE}, {64, 128, 1, 1}, {64, 1, CONV_SIZE, CONV_SIZE}, {128, 64, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {128, 128, 1, 1}, {128, 1, CONV_SIZE, CONV_SIZE}, {256, 128, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {256, 256, 1, 1}, {256, 1, CONV_SIZE, CONV_SIZE}, {512, 256, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {512, 512, 1, 1}, {512, 1, CONV_SIZE, CONV_SIZE}, {1024, 512, 1, 1}, {1024, 1, CONV_SIZE, CONV_SIZE}, {1024, 1024, 1, 1}}; int dshape[1][2] = {{1024, 10}}; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 128; block1 = malloc(max_channels * sizeof(float **)); block2 = malloc(max_channels * sizeof(float **)); // allocate block memory for (i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float *)); block2[i] = malloc(max_im_size * sizeof(float *)); for (j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t ****)malloc(CONV_LEVELS * sizeof(csr_t ***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t ***)malloc(cshape[l][0] * sizeof(csr_t **)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t **)malloc(cshape[l][1] * sizeof(csr_t *)); } } // wc memory allocated below will be freed in read_weights if // SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float ****)); // allocate kernel memory for (l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float ***)); for (i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float *)); for (k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float *)); batchnorm_biases = malloc(27 * sizeof(float *)); batchnorm_means = malloc(27 * sizeof(float *)); batchnorm_vars = malloc(27 * sizeof(float *)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float **)); bd = malloc(1 * sizeof(float *)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float *)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); // printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); // printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { // printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); csr_t *a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); // print_csr(a); wc_sparse[l][i][j] = a; // printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, // cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float *****)malloc(1 * sizeof(float ****)); wc_first_conv[l] = (float ****)malloc(cshape[l][0] * sizeof(float ***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float ***)malloc(cshape[l][1] * sizeof(float **)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float **)malloc(cshape[l][2] * sizeof(float *)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float *)malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created // above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size + 2][size + 2]; memset(zeropad, 0, ((size + 2) * (size + 2) * sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i = i + stride) { for (j = 0; j < size; j = j + stride) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { struct timeval start, end; gettimeofday(&start, NULL); int i, j, k, l; float sum; for (i = 0; i < output_channels; i++) { for (j = 0; j < image_size; j++) { for (k = 0; k < image_size; k++) { sum = 0.; for (l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0] [0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } gettimeofday(&end, NULL); pw_conv_time += get_seconds(start, end); } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel, i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for (i = 0; i < image_size; i++) { for (j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); // out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * // weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level + 1][0]; // printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level + 1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back // into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for (j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level]); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level + 1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { struct timeval start, end; gettimeofday(&start, NULL); int i, j; for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } gettimeofday(&end, NULL); dense_time += get_seconds(start, end); } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer; // * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < 128; i++) { for (int j = 0; j < mem_block_shape[1]; j++) { for (int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer; // * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL) { printf("Error opening file!\n"); exit(1); } for (int o = 0; o < cshape[layer][0]; o++) { for (int i = 0; i < cshape[layer][1]; i++) { for (int k_h = 0; k_h < cshape[layer][2]; k_h++) { for (int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL) { printf("Error opening file!\n"); exit(1); } for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for (int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c = 0; if (only_convolution == 1) { // for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum = -1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if (mem_block1_dense[i] > maximum) { maximum = mem_block2_dense[i]; c = i + 1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n", c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic, 1) \ num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 128, 128); reset_mem_block(block2); // depthwise convolutions for (level = 1; level < (CONV_LEVELS - 1); level = level + 2) { depthwise_convolution(block1, block2, wc[level], wc[level + 1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf( "Usage: <program.exe> <weights file> <images list file> <output file> " "<only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; // printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { pw_conv_time = 0.0; dense_time = 0.0; fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); printf("pw_conv time: %.3lf sec\n", pw_conv_time); printf("dense time: %.3lf sec\n", dense_time); output_predictions(results, only_convolution, 1024, 1); } // free_memory(); fclose(file_list); return 0; }
energy.h
#pragma once #include "core.h" #include "geometry.h" #include "space.h" #include "potentials.h" #include "multipole.h" #include "penalty.h" #include "mpi.h" #include <Eigen/Dense> #include <set> #ifdef ENABLE_POWERSASA #include <power_sasa.h> #endif namespace Faunus { namespace Energy { class Energybase { public: enum keys {OLD, NEW, NONE}; keys key=NONE; std::string name; std::string cite; virtual double energy(Change&)=0; //!< energy due to change inline virtual void to_json(json &j) const {}; //!< json output inline virtual void sync(Energybase*, Change&) {} inline virtual void init() {} //!< reset and initialize }; void to_json(json &j, const Energybase &base) { assert(!base.name.empty()); if (!base.cite.empty()) j[base.name]["reference"] = base.cite; base.to_json( j[base.name] ); } //!< Converts any energy class to json object /** * This holds Ewald setup and must *not* depend on particle type, nor depend on Space */ struct EwaldData { typedef std::complex<double> Tcomplex; Eigen::Matrix3Xd kVectors; // k-vectors, 3xK Eigen::VectorXd Aks; // 1xK, to minimize computational effort (Eq.24,DOI:10.1063/1.481216) Eigen::VectorXcd Qion, Qdip; // 1xK double alpha, rc, kc, check_k2_zero, lB; double const_inf, eps_surf; bool spherical_sum=true; bool ipbc=false; int kVectorsInUse=0; Point L; //!< Box dimensions void update(const Point &box) { L = box; int kcc = std::ceil(kc); check_k2_zero = 0.1*std::pow(2*pc::pi/L.maxCoeff(), 2); int kVectorsLength = (2*kcc+1) * (2*kcc+1) * (2*kcc+1) - 1; if (kVectorsLength == 0) { kVectors.resize(3,1); Aks.resize(1); kVectors.col(0) = Point(1,0,0); // Just so it is not the zero-vector Aks[0] = 0; kVectorsInUse = 1; Qion.resize(1); Qdip.resize(1); } else { double kc2 = kc*kc; kVectors.resize(3, kVectorsLength); Aks.resize(kVectorsLength); kVectorsInUse = 0; kVectors.setZero(); Aks.setZero(); int startValue = 1 - int(ipbc); for (int kx = 0; kx <= kcc; kx++) { double dkx2 = double(kx*kx); for (int ky = -kcc*startValue; ky <= kcc; ky++) { double dky2 = double(ky*ky); for (int kz = -kcc*startValue; kz <= kcc; kz++) { double factor = 1.0; if(kx > 0) factor *= 2; if(ky > 0 && ipbc) factor *= 2; if(kz > 0 && ipbc) factor *= 2; double dkz2 = double(kz*kz); Point kv = 2*pc::pi*Point(kx/L.x(),ky/L.y(),kz/L.z()); double k2 = kv.dot(kv); if (k2 < check_k2_zero) // Check if k2 != 0 continue; if (spherical_sum) if( (dkx2/kc2) + (dky2/kc2) + (dkz2/kc2) > 1) continue; kVectors.col(kVectorsInUse) = kv; Aks[kVectorsInUse] = factor*std::exp(-k2/(4*alpha*alpha))/k2; kVectorsInUse++; } } } Qion.resize(kVectorsInUse); Qdip.resize(kVectorsInUse); Aks.conservativeResize(kVectorsInUse); kVectors.conservativeResize(3,kVectorsInUse); } } }; void from_json(const json &j, EwaldData &d) { d.alpha = j.at("alpha"); d.rc = j.at("cutoff"); d.kc = j.at("kcutoff"); d.ipbc = j.value("ipbc", false); d.spherical_sum = j.value("spherical_sum", true); d.lB = pc::lB( j.at("epsr") ); d.eps_surf = j.value("epss", 0.0); d.const_inf = (d.eps_surf < 1) ? 0 : 1; // if unphysical (<1) use epsr infinity for surrounding medium } void to_json(json &j, const EwaldData &d) { j = {{"lB", d.lB}, {"ipbc", d.ipbc}, {"epss", d.eps_surf}, {"alpha", d.alpha}, {"cutoff", d.rc}, {"kcutoff", d.kc}, {"wavefunctions", d.kVectors.cols()}, {"spherical_sum", d.spherical_sum}}; } #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - EwaldData") { using doctest::Approx; EwaldData data = R"({ "ipbc": false, "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.update( Point(10,10,10) ); CHECK(data.ipbc == false); CHECK(data.const_inf == 1); CHECK(data.alpha == 0.894427190999916); CHECK(data.kVectors.cols() == 2975); CHECK(data.Qion.size() == data.kVectors.cols()); data.ipbc=true; data.update( Point(10,10,10) ); CHECK(data.kVectors.cols() == 846); CHECK(data.Qion.size() == data.kVectors.cols()); } #endif /** @brief recipe or policies for ion-ion ewald */ template<class Tspace, bool eigenopt=false /** use Eigen matrix ops where possible */> struct PolicyIonIon { typedef typename Tspace::Tpvec::iterator iter; Tspace *spc; Tspace *old=nullptr; // set only if key==NEW at first call to `sync()` PolicyIonIon(Tspace &spc) : spc(&spc) {} void updateComplex(EwaldData &data) const { if (eigenopt) if (data.ipbc==false) { auto pos = asEigenMatrix(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::pos); // Nx3 auto charge = asEigenVector(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::charge); // Nx1 Eigen::MatrixXd kr = pos.matrix() * data.kVectors; // Nx3 * 3xK = NxK data.Qion.real() = (kr.array().cos().colwise()*charge).colwise().sum(); data.Qion.imag() = kr.array().sin().colwise().sum(); return; } for (int k=0; k<data.kVectors.cols(); k++) { const Point& kv = data.kVectors.col(k); EwaldData::Tcomplex Q(0,0); if (data.ipbc) for (auto &i : spc->p) Q += kv.cwiseProduct(i.pos).array().cos().prod() * i.charge; else for (auto &i : spc->p) { double dot = kv.dot(i.pos); Q += i.charge * EwaldData::Tcomplex( std::cos(dot), std::sin(dot) ); } data.Qion[k] = Q; } } //!< Update all k vectors void updateComplex(EwaldData &data, iter begin, iter end) const { assert(old!=nullptr); assert(spc->p.size() == old->p.size()); size_t ibeg = std::distance(spc->p.begin(), begin); // it->index size_t iend = std::distance(spc->p.begin(), end); // it->index for (int k=0; k<data.kVectors.cols(); k++) { auto& Q = data.Qion[k]; Point q = data.kVectors.col(k); if (data.ipbc) for (size_t i=ibeg; i<=iend; i++) { Q += q.cwiseProduct( spc->p[i].pos ).array().cos().prod() * spc->p[i].charge; Q -= q.cwiseProduct( old->p[i].pos ).array().cos().prod() * old->p[i].charge; } else for (size_t i=ibeg; i<=iend; i++) { double _new = q.dot(spc->p[i].pos); double _old = q.dot(old->p[i].pos); Q += spc->p[i].charge * EwaldData::Tcomplex( std::cos(_new), std::sin(_new) ); Q -= old->p[i].charge * EwaldData::Tcomplex( std::cos(_old), std::sin(_old) ); } } } //!< Optimized update of k subset. Require access to old positions through `old` pointer double selfEnergy(const EwaldData &d) { double E = 0; for (auto& i : spc->p) E += i.charge * i.charge; return -d.alpha*E / std::sqrt(pc::pi) * d.lB; } double surfaceEnergy(const EwaldData &d) { if (d.const_inf < 0.5) return 0; Point qr(0,0,0); for (auto &i : spc->p) qr += i.charge*i.pos; return d.const_inf * 2 * pc::pi / ( (2*d.eps_surf+1) * spc->geo.getVolume() ) * qr.dot(qr) * d.lB; } double reciprocalEnergy(const EwaldData &d) { double E = 0; if (eigenopt) // known at compile time E = d.Aks.cwiseProduct( d.Qion.cwiseAbs2() ).sum(); else for (int k=0; k<d.Qion.size(); k++) E += d.Aks[k] * std::norm( d.Qion[k] ); return 2 * pc::pi / spc->geo.getVolume() * E * d.lB; } }; #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - IonIonPolicy") { using doctest::Approx; typedef Space<Geometry::Cuboid, Particle<Charge,Dipole>> Tspace; Tspace spc; spc.p.resize(2); spc.geo = R"( {"length": 10} )"_json; spc.p[0] = R"( {"pos": [0,0,0], "q": 1.0} )"_json; spc.p[1] = R"( {"pos": [1,0,0], "q": -1.0} )"_json; PolicyIonIon<Tspace> ionion(spc); EwaldData data = R"({ "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.ipbc = false; // PBC Ewald (http://dx.doi.org/10.1063/1.481216) data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.21303063979675319*data.lB) ); data.ipbc = true; // IPBC Ewald data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.0865107467*data.lB) ); } #endif /** @brief Ewald summation reciprocal energy */ template<class Tspace, class Policy=PolicyIonIon<Tspace>> class Ewald : public Energybase { private: EwaldData data; Policy policy; Tspace& spc; public: Ewald(const json &j, Tspace &spc) : policy(spc), spc(spc) { name = "ewald"; data = j; init(); } void init() override { data.update( spc.geo.getLength() ); policy.updateComplex(data); // brute force. todo: be selective } double energy(Change &change) override { double u=0; if (!change.empty()) { // If the state is NEW (trial state), then update all k-vectors if (key==NEW) { if (change.all || change.dV) { // everything changes data.update( spc.geo.getLength() ); policy.updateComplex(data); // update all (expensive!) } else { if (change.groups.size()==1) { // exactly one group is moved auto& d = change.groups[0]; auto& g = spc.groups[d.index]; if (d.atoms.size()==1) // exactly one atom is moved policy.updateComplex(data, g.begin()+d.atoms[0], g.begin()+d.atoms[0]); else policy.updateComplex(data, g.begin(), g.end()); } else policy.updateComplex(data); } } u = policy.selfEnergy(data) + policy.surfaceEnergy(data) + policy.reciprocalEnergy(data); } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (other->key==OLD) policy.old = &(other->spc); // give NEW access to OLD space for optimized updates data = other->data; // copy everything! } //!< Called after a move is rejected/accepted as well as before simulation void to_json(json &j) const override { j = data; } }; template<typename Tspace> class Isobaric : public Energybase { private: Tspace& spc; double P; // P/kT public: Isobaric(const json &j, Tspace &spc) : spc(spc) { name = "isobaric"; cite = "Frenkel & Smith 2nd Ed (Eq. 5.4.13)"; P = j.value("P/mM", 0.0) * 1.0_mM; if (P<1e-10) { P = j.value("P/Pa", 0.0) * 1.0_Pa; if (P<1e-10) P = j.at("P/atm").get<double>() * 1.0_atm; } } double energy(Change &change) override { if (change.dV || change.all) { double V = spc.geo.getVolume(); size_t N=0; for (auto &g : spc.groups) if (!g.empty()) { if (g.atomic) N += g.size(); else N++; } return P*V-(N+1)*std::log(V); } else return 0; } void to_json(json &j) const override { j["P/atm"] = P / 1.0_atm; j["P/mM"] = P / 1.0_mM; j["P/Pa"] = P / 1.0_Pa; _roundjson(j,5); } }; template<typename Tspace> class ExternalPotential : public Energybase { protected: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; bool COM=false; // apply on center-of-mass Tspace& spc; std::set<int> molids; // molecules to act upon std::function<double(const Tparticle&)> func=nullptr; // energy of single particle std::vector<std::string> _names; template<class Tparticle> double _energy(const Group<Tparticle> &g) const { double u=0; if (molids.find(g.id)!=molids.end()) { if (COM) { // apply only to center of mass Tparticle dummy; dummy.pos = g.cm; u = func(dummy); } else { for (auto &p : g) { u += func(p); if (std::isnan(u)) break; } } } return u; } //!< External potential on a single particle public: ExternalPotential(const json &j, Tspace &spc) : spc(spc) { name="external"; COM = j.value("com", false); _names = j.at("molecules").get<decltype(_names)>(); // molecule names auto _ids = names2ids(molecules<Tpvec>, _names); // names --> molids molids = std::set<int>(_ids.begin(), _ids.end()); // vector --> set if (molids.empty() || molids.size()!=_names.size() ) throw std::runtime_error(name + ": molecule list is empty"); } double energy(Change &change) override { assert(func!=nullptr); double u=0; if (change.dV || change.all) { for (auto &g : spc.groups) { // check all groups u += _energy(g); if (std::isnan(u)) break; } } else for (auto &d : change.groups) { auto &g = spc.groups.at(d.index); // check specified groups if (d.all || COM) // check all atoms in group u += _energy(g); else { // check only specified atoms in group if (molids.find(g.id)!=molids.end()) for (auto i : d.atoms) u += func( *(g.begin()+i) ); } if (std::isnan(u)) break; } return u; } void to_json(json &j) const override { j["molecules"] = _names; j["com"] = COM; } }; //!< Base class for external potentials, acting on particles template<typename Tspace, typename base=ExternalPotential<Tspace>> class Confine : public base { public: enum Variant {sphere, cylinder, cuboid, none}; Variant type=none; private: Point origo={0,0,0}, dir={1,1,1}; Point low, high; double radius, k; bool scale=false; std::map<std::string, Variant> m = { {"sphere", sphere}, {"cylinder", cylinder}, {"cuboid", cuboid} }; public: Confine(const json &j, Tspace &spc) : base(j,spc) { base::name = "confine"; k = value_inf(j, "k") * 1.0_kJmol; // get floating point; allow inf/-inf type = m.at( j.at("type") ); if (type==sphere || type==cylinder) { radius = j.at("radius"); origo = j.value("origo", origo); scale = j.value("scale", scale); if (type==cylinder) dir = {1,1,0}; base::func = [&radius=radius, origo=origo, k=k, dir=dir](const typename base::Tparticle &p) { double d2 = (origo-p.pos).cwiseProduct(dir).squaredNorm() - radius*radius; if (d2>0) return 0.5*k*d2; return 0.0; }; // If volume is scaled, also scale the confining radius by adding a trigger // to `Space::scaleVolume()` if (scale) spc.scaleVolumeTriggers.push_back( [&radius=radius](Tspace &spc, double Vold, double Vnew) { radius *= std::cbrt(Vnew/Vold); } ); } if (type==cuboid) { low = j.at("low").get<Point>(); high = j.at("high").get<Point>(); base::func = [low=low, high=high, k=k](const typename base::Tparticle &p) { double u=0; Point d = low-p.pos; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; d = p.pos-high; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; return 0.5*k*u; }; } } void to_json(json &j) const override { if (type==cuboid) j = {{"low", low}, {"high", high}}; if (type==sphere || type==cylinder) j = {{"radius", radius}}; if (type==sphere) { j["origo"] = origo; j["scale"] = scale; } for (auto &i : m) if (i.second==type) j["type"] = i.first; j["k"] = k/1.0_kJmol; base::to_json(j); _roundjson(j,5); } }; //!< Confine particles to a sub-region of the simulation container /* * The keys of the `intra` map are group index and the values * is a vector of `BondData`. For bonds between groups, fill * in `inter` which is evaluated for every update of call to * `energy`. * * @todo Optimize. */ template<typename Tspace> class Bonded : public Energybase { private: Tspace& spc; typedef typename Tspace::Tpvec Tpvec; typedef std::vector<std::shared_ptr<Potential::BondData2>> BondVector; BondVector inter; // inter-molecular bonds std::map<int,BondVector> intra; // intra-molecular bonds void update() { using namespace Potential; intra.clear(); for (size_t i=0; i<spc.groups.size(); i++) { if (!spc.groups.empty()) { auto &g = spc.groups[i]; for (auto &b : molecules<Tpvec>.at(g.id).bonds2) { intra[i].push_back( b->clone() ); // deep copy BondData from MoleculeData intra[i].back()->shift( std::distance(spc.p.begin(), g.begin()) ); Potential::setBondEnergyFunction( intra[i].back(), spc.p ); } } } } // finds and adds all intra-molecular bonds of active molecules double sum( const BondVector &v ) const { double u=0; for (auto &b : v) { assert(b->hasEnergyFunction()); u += b->energy(spc.geo.distanceFunc); } return u; } // sum energy in vector of BondData public: Bonded(const json &j, Tspace &spc) : spc(spc) { name = "bonded"; update(); if (j.is_object()) if (j.count("bondlist")==1) inter = j["bondlist"].get<BondVector>(); } void to_json(json &j) const override { if (!inter.empty()) j["bondlist"] = inter; if (!intra.empty()) { json& _j = j["bondlist-intramolecular"]; _j = json::array(); for (auto &i : intra) for (auto &b : i.second) _j.push_back(b); } } double energy(Change &c) override { double u=0; if ( !c.empty() ) { u = sum(inter); // energy of inter-molecular bonds if ( c.all || c.dV ) { for (auto& i : intra) // energy of intra-molecular bonds if (!spc.groups[i.first].empty()) // add only if group is active u += sum(i.second); } else for (auto &d : c.groups) if (d.internal) u += sum( intra[d.index] ); } return u; }; // brute force -- refine this! }; /** * @brief Nonbonded energy using a pair-potential */ template<typename Tspace, typename Tpairpot> class Nonbonded : public Energybase { private: double g2gcnt=0, g2gskip=0; protected: typedef typename Tspace::Tgroup Tgroup; double Rc2_g2g=pc::infty; void to_json(json &j) const override { j["pairpot"] = pairpot; j["cutoff_g2g"] = std::sqrt(Rc2_g2g); } template<typename T> inline bool cut(const T &g1, const T &g2) { g2gcnt++; if (g1.atomic || g2.atomic) return false; if ( spc.geo.sqdist(g1.cm, g2.cm)<Rc2_g2g ) return false; g2gskip++; return true; } //!< true if group<->group interaction can be skipped template<typename T> inline double i2i(const T &a, const T &b) { assert(&a!=&b && "a and b cannot be the same particle"); return pairpot(a, b, spc.geo.vdist(a.pos, b.pos)); } /* * Internal energy in group, calculating all with all or, if `index` * is given, only a subset. Index specifies the internal index (starting * at zero) of changed particles within the group. */ double g_internal(const Tgroup &g, const std::vector<int> &index=std::vector<int>()) { using namespace ranges; double u=0; if (index.empty()) // assume that all atoms have changed for ( auto i = g.begin(); i != g.end(); ++i ) for ( auto j=i; ++j != g.end(); ) u += i2i(*i, *j); else { // only a subset have changed auto fixed = view::ints( 0, int(g.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (int i : index) {// moved<->static for (int j : fixed ) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } for (int i : index) // moved<->moved for (int j : index) if (j>i) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } return u; } /* * Calculates the interaction energy of a particle, `i`, * and checks (1) if it is already part of Space, or (2) * external to space. */ double i2all(const typename Tspace::Tparticle &i) { double u=0; auto it = spc.findGroupContaining(i); // iterator to group if (it!=spc.groups.end()) { // check if i belongs to group in space for (auto &g : spc.groups) // i with all other particles if (&g!=&(*it)) // avoid self-interaction if (!cut(g, *it)) // check g2g cut-off for (auto &j : g) // loop over particles in other group u += i2i(i,j); for (auto &j : *it) // i with all particles in own group if (&j!=&i) u += i2i(i,j); } else // particle does not belong to any group for (auto &g : spc.groups) // i with all other *active* particles for (auto &j : g) // (this will include only active particles) u += i2i(i,j); return u; } /* * Group-to-group energy. A subset of `g1` can be given with `index` which refers * to the internal index (starting at zero) of the first group, `g1 * NOTE: the interpretation of this function is extended to also consider the mutual interactions * of a subset of each group and in such case returns sub1 <-> 2 and !sub1<->sub2, * hence excluding !sub1 <-> !sub2 in comparision to calling onconstrained g2g. In absence * of sub1 any sub2 is ignored. */ virtual double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) { using namespace ranges; double u = 0; if (!cut(g1,g2)) { if ( index.empty() && jndex.empty() ) // if index is empty, assume all in g1 have changed for (auto &i : g1) for (auto &j : g2) { u += i2i(i,j); } else {// only a subset of g1 for (auto i : index) for (auto j=g2.begin(); j!=g2.end(); ++j) { u += i2i( *(g1.begin()+i), *j); } if ( !jndex.empty() ) { auto fixed = view::ints( 0, int(g1.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (auto i : jndex) // moved2 <-| for (auto j : fixed) {// static1 <-| u += i2i( *(g2.begin()+i), *(g1.begin()+j)); } } } } return u; } public: Tspace& spc; //!< Space to operate on Tpairpot pairpot; //!< Pair potential Nonbonded(const json &j, Tspace &spc) : spc(spc) { name="nonbonded"; pairpot = j; Rc2_g2g = std::pow( j.value("cutoff_g2g", pc::infty), 2); } double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.dV) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); if (i->atomic) u += g_internal(*i); } return u; } // did everything change? if (change.all) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); u += g_internal(*i); } // more todo here... return u; } // if exactly ONE molecule is changed if (change.groups.size()==1 && !change.dNpart) { auto& d = change.groups[0]; auto gindex = spc.groups.at(d.index).to_index(spc.p.begin()).first; if (d.atoms.size()==1) // exactly one atom has moved return i2all(spc.p.at(gindex+d.atoms[0])); auto& g1 = spc.groups.at(d.index); for (auto &g2 : spc.groups) if (&g1 != &g2) u += g2g(g1, g2, d.atoms); if (d.internal) u += g_internal(g1, d.atoms); return u; } // if (change.dNpart) { auto moved = change.touchedGroupIndex(); // index of moved groups std::vector<int> Moved; for (auto i: moved) Moved.push_back(i); std::sort( Moved.begin(), Moved.end() ); auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&Moved](int i){return std::binary_search(Moved.begin(), Moved.end(), i);} ); // index of static groups for ( auto cg1 = change.groups.begin(); cg1 < change.groups.end() ; ++cg1 ) { // Loop over all changed groups std::vector<int> ifiltered, jfiltered; for (auto i: cg1->atoms) { if ( i < spc.groups.at(cg1->index).size() ) ifiltered.push_back(i); } if ( !( cg1->dNpart && ifiltered.empty() ) ) // Skip if particles are removed for ( auto j : fixed) { u += g2g( spc.groups.at(cg1->index), spc.groups[j], ifiltered, jfiltered ); } for ( auto cg2 = cg1; ++cg2 != change.groups.end(); ) { for (auto i: cg2->atoms) if ( i < spc.groups.at(cg2->index).size() ) jfiltered.push_back(i); if ( !( (cg1->dNpart && ifiltered.empty()) && (cg2->dNpart && jfiltered.empty()) ) ) //Skip if particles are removed from both u += g2g( spc.groups.at(cg1->index), spc.groups.at(cg2->index), ifiltered, jfiltered ); jfiltered.clear(); } if ( ifiltered.size() != 0 ) u += g_internal( spc.groups.at( cg1->index ), ifiltered ); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) { for ( auto j=i; ++j != moved.end(); ) u += g2g( spc.groups[*i], spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(spc.groups[i], spc.groups[j]); // more todo! } return u; } }; //!< Nonbonded, pair-wise additive energy term template<typename Tspace, typename Tpairpot> class NonbondedCached : public Nonbonded<Tspace,Tpairpot> { private: typedef Nonbonded<Tspace,Tpairpot> base; typedef typename Tspace::Tgroup Tgroup; Eigen::MatrixXf cache; Tspace &spc; double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) override { int i = &g1 - &base::spc.groups.front(); int j = &g2 - &base::spc.groups.front(); if (j<i) std::swap(i,j); if (base::key==Energybase::NEW) { // if this is from the trial system, double u = 0; if (!base::cut(g1,g2)) { for (auto &i : g1) for (auto &j : g2) u += base::i2i(i,j); } cache(i,j) = u; } return cache(i,j); // return (cached) value } public: NonbondedCached(const json &j, Tspace &spc) : base(j,spc), spc(spc) { base::name += "EM"; init(); } void init() override { cache.resize( spc.groups.size(), spc.groups.size() ); cache.setZero(); for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) { int k = &(*i) - &base::spc.groups.front(); int l = &(*j) - &base::spc.groups.front(); if (l<k) std::swap(k,l); double u = 0; if (!base::cut(*i,*j)) { for (auto &k : *i) for (auto &l : *j) u += base::i2i(k,l); } cache(k,l) = u; } } } //!< Cache pair interactions in matrix double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.all || change.dV) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) u += g2g( *i, *j ); } return u; } // if exactly ONE molecule is changed if (change.groups.size()==1) { auto& d = change.groups[0]; auto& g1 = base::spc.groups.at(d.index); for (auto &g2 : base::spc.groups) { if (&g1 != &g2) u += g2g(g1, g2, d.atoms); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(base::spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) for ( auto j=i; ++j != moved.end(); ) { u += g2g( base::spc.groups[*i], base::spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(base::spc.groups[i], base::spc.groups[j]); // more todo! } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (change.all || change.dV) cache.triangularView<Eigen::StrictlyUpper>() = (other->cache).template triangularView<Eigen::StrictlyUpper>(); else for (auto &d : change.groups) { for (int i=0; i<d.index; i++) cache(i,d.index) = other->cache(i,d.index); for (size_t i=d.index+1; i<base::spc.groups.size(); i++) cache(d.index,i) = other->cache(d.index,i); } } //!< Copy energy matrix from other }; //!< Nonbonded with cached energies (Energy Matrix) /** * `udelta` is the total change of updating the energy function. If * not handled this will appear as an energy drift (which it is!). To * avoid this, this term is added to the energy but since it's the * same in both the trial and old state energies it will not affect * MC move acceptance. */ template<typename Tspace> class Penalty : public Energybase { protected: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tgroup Tgroup; typedef typename Tspace::Tpvec Tpvec; typedef typename std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> Tcoord; Tspace &spc; bool nodrift; bool quiet; size_t dim=0; size_t cnt=0; // number of calls to `sync()` size_t nupdate; // update frequency [steps] size_t samplings; size_t nconv=0; double udelta=0; // total energy change of updating penalty function double scale; // scaling factor for f0 double f0; // penalty increment std::string file, hisfile; std::vector<Tcoord> rcvec; // vector of reaction coordinate functions std::vector<double> coord; // latest reaction coordinate Table<int> histo; Table<double> penalty; public: Penalty(const json &j, Tspace &spc) : spc(spc) { using namespace ReactionCoordinate; name = "penalty"; f0 = j.value("f0", 0.5); scale = j.value("scale", 0.8); quiet = j.value("quiet", true); nupdate = j.value("update", 0); samplings = j.value("samplings", 1); nodrift = j.value("nodrift", true); file = j.at("file").get<std::string>(); hisfile = j.value("histogram", "penalty-histogram.dat"); std::vector<double> binwidth, min, max; if (scale<0 || scale>1) throw std::runtime_error("`scale` must be in the interval [0:1]"); for (auto &i : j.at("coords")) if (i.is_object()) if (i.size()==1) { std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc=nullptr; for (auto it=i.begin(); it!=i.end(); ++it) { if (it.key()=="atom") rc = std::make_shared<AtomProperty>(it.value(), spc); if (it.key()=="system") rc = std::make_shared<SystemProperty>(it.value(), spc); if (it.key()=="cmcm") rc = std::make_shared<MassCenterSeparation>(it.value(), spc); if (it.key()=="angle") rc = std::make_shared<PrincipalAxisAngle>(it.value(), spc); if (rc!=nullptr) { if (rc->min>=rc->max || rc->binwidth<=0) throw std::runtime_error("min<max and binwidth>0 required for '" + it.key() + "'"); rcvec.push_back(rc); binwidth.push_back( rc->binwidth ); min.push_back( rc->min ); max.push_back( rc->max ); } else throw std::runtime_error("unknown coordinate type '" + it.key() + "'"); } } dim = binwidth.size(); if (dim<1 || dim>2) throw std::runtime_error("minimum one maximum two coordinates required"); coord.resize(2,0); histo.reInitializer(binwidth, min, max); penalty.reInitializer(binwidth, min, max); std::ifstream f(MPI::prefix+file); if (f) { cout << "Loading penalty function '" << MPI::prefix+file << "'" << endl; std::string hash; f >> hash >> f0 >> samplings; for (int row=0; row<penalty.rows(); row++) for (int col=0; col<penalty.cols(); col++) if (!f.eof()) f >> penalty(row,col); else throw std::runtime_error("penalty file dimension mismatch"); } } virtual ~Penalty() { std::ofstream f1(MPI::prefix + file), f2(MPI::prefix + hisfile); if (f1) f1 << "# " << f0 << " " << samplings << "\n" << penalty.array() - penalty.minCoeff() << endl; if (f2) f2 << histo << endl; // add function to save to numpy-friendly file... } void to_json(json &j) const override { j["file"] = file; j["scale"] = scale; j["update"] = nupdate; j["nodrift"] = nodrift; j["histogram"] = hisfile; j["f0_final"] = f0; auto& _j = j["coords"] = json::array(); for (auto rc : rcvec) { json t; t[rc->name] = *rc; _j.push_back(t); } } double energy(Change &change) override { assert(rcvec.size()<=coord.size()); double u=0; coord.resize( rcvec.size() ); if (!change.empty()) { for (size_t i=0; i<rcvec.size(); i++) { coord.at(i) = rcvec[i]->operator()(); if (!rcvec[i]->inRange(coord[i])) return pc::infty; } penalty.to_index(coord); u = penalty[coord]; } return (nodrift) ? u - udelta : u; } virtual void update(const std::vector<double> &c) { if (++cnt % nupdate == 0 && f0>0) { bool b = histo.minCoeff() >= (int)samplings; if (b && f0>0) { double min = penalty.minCoeff(); penalty = penalty.array() - min; if (!quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); histo.setZero(); udelta += -min; } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += f0; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); update(other->coord); other->update(other->coord); } // @todo: this double the MPI communication }; #ifdef ENABLE_MPI template<typename Tspace, typename Base=Penalty<Tspace>> struct PenaltyMPI : public Base { using Base::samplings; using Base::penalty; using Base::udelta; using Base::scale; using Base::histo; using Base::coord; using Base::cnt; using Base::f0; using Base::file; using Base::hisfile; using Base::nconv; Eigen::VectorXi weights; // array w. mininum histogram counts Eigen::VectorXd buffer; // receive buffer for penalty functions PenaltyMPI(const json &j, Tspace &spc) : Base(j,spc) { weights.resize( MPI::mpi.nproc() ); buffer.resize( penalty.size()*MPI::mpi.nproc() ); } void update(const std::vector<double> &c) override { using namespace Faunus::MPI; double uold = penalty[c]; if (++cnt % this->nupdate == 0 && f0>0) { int min = histo.minCoeff(); MPI_Barrier(mpi.comm); MPI_Allgather(&min, 1, MPI_INT, weights.data(), 1, MPI_INT, mpi.comm); if ( weights.maxCoeff() > samplings ) { MPI_Gather(penalty.data(), penalty.size(), MPI_DOUBLE, buffer.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); if (mpi.isMaster()) { penalty.setZero(); for (int i=0; i<mpi.nproc(); i++) penalty += Eigen::Map<Eigen::MatrixXd>( buffer.data()+i*penalty.size(), penalty.rows(), penalty.cols() ); penalty = ( penalty.array() - penalty.minCoeff() ) / double(mpi.nproc()); } MPI_Bcast(penalty.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); nconv += 1; std::ofstream f3(MPI::prefix + std::to_string(nconv) + file); if (f3) f3 << "# " << f0 << " " << samplings << "\n" << penalty.array() << endl; std::ofstream f4(MPI::prefix + std::to_string(nconv) + hisfile); if (f4) f4 << histo << endl; if (min>0 && !this->quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; histo.setZero(); f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += penalty[coord] - uold; } //!< Average penalty function across all nodes }; //!< Penalty function with MPI exchange #endif #ifdef ENABLE_POWERSASA /* * @todo: * - can only a subset of sasa be calculated? Note that it's the * `update_coord()` function that takes up most time. * - delegate to GPU? In the PowerSasa paper this is mentioned */ template<class Tspace> class SASAEnergy : public Energybase { public: std::vector<float> sasa, radii; private: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tpvec Tpvec; Tspace& spc; double probe; // sasa probe radius (angstrom) double conc=0;// co-solute concentration (mol/l) Average<double> avgArea; // average surface area std::shared_ptr<POWERSASA::PowerSasa<float,Point>> ps=nullptr; void updateSASA(const Tpvec &p) { radii.resize(p.size()); std::transform(p.begin(), p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;}); ps->update_coords(spc.positions(), radii); // slowest step! for (size_t i=0; i<p.size(); i++) { auto &a = atoms<Tparticle>[p[i].id]; if (std::fabs(a.tfe)>1e-9 || std::fabs(a.tension)>1e-9) ps->calc_sasa_single(i); } sasa = ps->getSasa(); assert(sasa.size()==p.size()); } void to_json(json &j) const override { using namespace u8; j["molarity"] = conc / 1.0_molar; j["radius"] = probe / 1.0_angstrom; j[bracket("SASA")+"/"+angstrom+squared] = avgArea.avg() / 1.0_angstrom; _roundjson(j,5); } /* * @note * This is not enough as the PowerSasa object contains data * that also need syncing. It works due to the `update` (expensive!) * call in `energy`. */ void sync(Energybase *basePtr, Change &c) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) { if (c.all || c.dV) { radii = other->radii; sasa = other->sasa; } else { for (auto &d : c.groups) { int offset = std::distance(spc.p.begin(), spc.groups.at(d.index).begin()); for (int j : d.atoms) { int i = j + offset; radii[i] = other->radii[i]; sasa[i] = other->sasa[i]; } } } } } public: SASAEnergy(const json &j, Tspace &spc) : spc(spc) { name = "sasa"; cite = "doi:10.1002/jcc.21844"; probe = j.value("radius", 1.4) * 1.0_angstrom; conc = j.value("molarity", conc) * 1.0_molar; init(); } void init() override { radii.resize( spc.p.size() ); std::transform( spc.p.begin(), spc.p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;} ); if (ps==nullptr) ps = std::make_shared<POWERSASA::PowerSasa<float,Point>>(spc.positions(),radii); updateSASA(spc.p); } double energy(Change &change) override { double u=0, A=0; /* * ideally we want to call `update` only is `key==NEW` but * syncronising the PowerSasa object is difficult since it's * non-copyable. */ updateSASA(spc.p); // ideally we want for (size_t i=0; i<spc.p.size(); ++i) { auto &a = atoms<Tparticle>[ spc.p[i].id ]; u += sasa[i] * (a.tension + conc * a.tfe); A += sasa[i]; } avgArea+=A; // sample average area for accepted confs. only return u; } }; //!< SASA energy from transfer free energies #endif struct Example2D : public Energybase { Point& i; // reference to 1st particle in the system template<typename Tspace> Example2D(const json &j, Tspace &spc): i(spc.p.at(0).pos) { name = "Example2D"; } double energy(Change &change) override { double s=1+std::sin(2*pc::pi*i.x())+std::cos(2*pc::pi*i.y()); if (i.x()>=-2.00 && i.x()<=-1.25) return 1*s; if (i.x()>=-1.25 && i.x()<=-0.25) return 2*s; if (i.x()>=-0.25 && i.x()<= 0.75) return 3*s; if (i.x()>= 0.75 && i.x()<= 1.75) return 4*s; if (i.x()>= 1.75 && i.x()<= 2.00) return 5*s; return 1e10; } }; template<typename Tspace> class Hamiltonian : public Energybase, public BasePointerVector<Energybase> { protected: typedef typename Tspace::Tparticle Tparticle; void to_json(json &j) const override { for (auto i : this->vec) j.push_back(*i); } void addEwald(const json &j, Tspace &spc) { if (j.count("coulomb")==1) if (j["coulomb"].at("type")=="ewald") push_back<Energy::Ewald<Tspace>>(j["coulomb"], spc); } //!< Adds an instance of reciprocal space Ewald energies (if appropriate) public: Hamiltonian(Tspace &spc, const json &j) { using namespace Potential; typedef CombinedPairPotential<CoulombGalore,LennardJones<Tparticle>> CoulombLJ; typedef CombinedPairPotential<CoulombGalore,HardSphere<Tparticle>> CoulombHS; typedef CombinedPairPotential<CoulombGalore,WeeksChandlerAndersen<Tparticle>> CoulombWCA; typedef CombinedPairPotential<Coulomb,WeeksChandlerAndersen<Tparticle>> PrimitiveModelWCA; Energybase::name="hamiltonian"; for (auto &m : j.at("energy")) {// loop over move list size_t oldsize = vec.size(); for (auto it=m.begin(); it!=m.end(); ++it) { try { if (it.key()=="nonbonded_coulomblj") push_back<Energy::Nonbonded<Tspace,CoulombLJ>>(it.value(), spc); if (it.key()=="nonbonded") push_back<Energy::Nonbonded<Tspace,FunctorPotential<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_coulombhs") push_back<Energy::Nonbonded<Tspace,CoulombHS>>(it.value(), spc); if (it.key()=="nonbonded_coulombwca") push_back<Energy::Nonbonded<Tspace,CoulombWCA>>(it.value(), spc); if (it.key()=="nonbonded_pmwca") push_back<Energy::Nonbonded<Tspace,PrimitiveModelWCA>>(it.value(), spc); if (it.key()=="nonbonded_deserno") push_back<Energy::NonbondedCached<Tspace,DesernoMembrane<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_desernoAA") push_back<Energy::NonbondedCached<Tspace,DesernoMembraneAA<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="bonded") push_back<Energy::Bonded<Tspace>>(it.value(), spc); if (it.key()=="confine") push_back<Energy::Confine<Tspace>>(it.value(), spc); if (it.key()=="example2d") push_back<Energy::Example2D>(it.value(), spc); if (it.key()=="isobaric") push_back<Energy::Isobaric<Tspace>>(it.value(), spc); if (it.key()=="penalty") #ifdef ENABLE_MPI push_back<Energy::PenaltyMPI<Tspace>>(it.value(), spc); #else push_back<Energy::Penalty<Tspace>>(it.value(), spc); #endif #ifdef ENABLE_POWERSASA if (it.key()=="sasa") push_back<Energy::SASAEnergy<Tspace>>(it.value(), spc); #endif // additional energies go here... addEwald(it.value(), spc); // add reciprocal Ewald terms if appropriate if (vec.size()==oldsize) std::cerr << "warning: ignoring unknown energy '" << it.key() << "'" << endl; } catch (std::exception &e) { throw std::runtime_error("Error adding energy '" + it.key() + "': " + e.what()); } } } } double energy(Change &change) override { double du=0; for (auto i : this->vec) { i->key=key; du += i->energy(change); } return du; } //!< Energy due to changes void init() override { for (auto i : this->vec) i->init(); } void sync(Energybase* basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) if (other->size()==size()) { for (size_t i=0; i<size(); i++) this->vec[i]->sync( other->vec[i].get(), change ); return; } throw std::runtime_error("hamiltonian mismatch"); } }; //!< Aggregates and sum energy terms }//namespace }//namespace
SpatialConvolutionLocal.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionLocal.c" #else static inline void THNN_(SpatialConvolutionLocal_shapeCheck)( THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kH, int kW, int dH, int dW, int padH, int padW, int64_t inputHeight, int64_t inputWidth, int64_t outputHeight, int64_t outputWidth) { THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); int ndim = input->nDimension; int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, "3D or 4D input tensor expected but got: %s"); int64_t nInputPlane = weight->size[2] / (kH * kW); int64_t nOutputPlane = weight->size[1]; if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 3, 0, nOutputPlane); THNN_CHECK_DIM_SIZE(bias, 3, 1, outputHeight); THNN_CHECK_DIM_SIZE(bias, 3, 2, outputWidth); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(view_weight_local)(THTensor *_weight) { THTensor *weight = THTensor_(newContiguous)(_weight); THArgCheck(weight->nDimension == 3 || weight->nDimension == 6, 4, "weight tensor should be 3D or 6D - got %dD", weight->nDimension); if (weight->nDimension == 6) { int64_t s1 = weight->size[0] * weight->size[1]; int64_t s2 = weight->size[2]; int64_t s3 = weight->size[3] * weight->size[4] * weight->size[5]; THTensor *old_weight = weight; weight = THTensor_(newWithStorage3d)(weight->storage, weight->storageOffset, s1, -1, s2, -1, s3, -1); THTensor_(free)(old_weight); } return weight; } static void THNN_(SpatialConvolutionLocal_updateOutput_frame) ( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { THTensor *output3d, *finput3d; THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); THTensor_(copy)(output, bias); output3d = THTensor_(newWithStorage3d) (output->storage, output->storageOffset, outputHeight * outputWidth, 1, nOutputPlane, outputHeight * outputWidth, 1, nOutputPlane * outputHeight * outputWidth); finput3d = THTensor_(newWithStorage3d) (finput->storage, finput->storageOffset, outputHeight * outputWidth, 1, kW * kH * nInputPlane, outputHeight * outputWidth, 1, kW * kH * nInputPlane * outputHeight * outputWidth); // weight: oH*oW x nOutputPlane x nInputPlane*kH*kW // finput3d: oH*oW x nInputPlane*kH*kW x 1 THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d); // output3d: oH*oW x nOutputPlane x 1 THTensor_(free)(output3d); THTensor_(free)(finput3d); } void THNN_(SpatialConvolutionLocal_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) { weight = THNN_(view_weight_local)(weight); THNN_(SpatialConvolutionLocal_shapeCheck) (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); int64_t nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH); int64_t nOutputPlane = THTensor_(size)(weight, 1); if(input->nDimension == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); THNN_(SpatialConvolutionLocal_updateOutput_frame) (input, output, weight, bias, finput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size[0]; int64_t t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(SpatialConvolutionLocal_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(weight); } static void THNN_(SpatialConvolutionLocal_updateGradInput_frame) (THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { THTensor *gradOutput3d, *fgradInput3d; gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset, outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); fgradInput3d = THTensor_(newWithStorage3d)(fgradInput->storage, fgradInput->storageOffset, outputHeight*outputWidth, 1, kW*kH*nInputPlane, outputHeight*outputWidth, 1, kW*kH*nInputPlane*outputHeight*outputWidth); // weight: oH*oW x nInputPlane*kH*kW x nOutputPlane // gradOutput3d: oH*oW x nOutputPlane x 1 THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d); // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1 THTensor_(free)(gradOutput3d); THTensor_(free)(fgradInput3d); THTensor_(zero)(gradInput); THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); } void THNN_(SpatialConvolutionLocal_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) { weight = THNN_(view_weight_local)(weight); THNN_(SpatialConvolutionLocal_shapeCheck) (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); int64_t nInputPlane = THTensor_(size)(weight,2)/(kW*kH); int64_t nOutputPlane = THTensor_(size)(weight,1); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 1, 2); if(input->nDimension == 3) { THNN_(SpatialConvolutionLocal_updateGradInput_frame) (gradInput, gradOutput, tweight, fgradInput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size[0]; int64_t t; #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(SpatialConvolutionLocal_updateGradInput_frame) (gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } static void THNN_(SpatialConvolutionLocal_accGradParameters_frame) (THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { THTensor *gradOutput3d, *finput3d; gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset, outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); finput3d = THTensor_(newWithStorage3d)(finput->storage, finput->storageOffset, outputHeight*outputWidth, 1, 1, kW*kH*nInputPlane*outputHeight*outputWidth, kW*kH*nInputPlane, outputHeight*outputWidth); // gradOutput3d: oH*oW x nOutputPlane x 1 // finput3d: oH*oW x 1 x kW*kH*nInputPlane THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d); // gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane THTensor_(cadd)(gradBias, gradBias, scale, gradOutput); THTensor_(free)(gradOutput3d); THTensor_(free)(finput3d); } void THNN_(SpatialConvolutionLocal_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight, accreal scale_) { THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); gradWeight = THNN_(view_weight_local)(gradWeight); THNN_(SpatialConvolutionLocal_shapeCheck) (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); int64_t nInputPlane = THTensor_(size)(gradWeight,2)/(kW*kH); int64_t nOutputPlane = THTensor_(size)(gradWeight,1); if(input->nDimension == 3) { THNN_(SpatialConvolutionLocal_accGradParameters_frame) (gradOutput, gradWeight, gradBias, finput, scale, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size[0]; int64_t t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(SpatialConvolutionLocal_accGradParameters_frame) (gradOutput_t, gradWeight, gradBias, finput_t, scale, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(gradWeight); } #endif
_kdtree_core.c
/* pykdtree, Fast kd-tree implementation with OpenMP-enabled queries Copyright (C) 2013 - present Esben S. Nielsen This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* This kd-tree implementation is based on the scipy.spatial.cKDTree by Anne M. Archibald and libANN by David M. Mount and Sunil Arya. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <float.h> #define PA(i,d) (pa[no_dims * pidx[i] + d]) #define PASWAP(a,b) { uint32_t tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; } #ifdef _MSC_VER #define restrict __restrict #endif typedef struct { float cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; float cut_bounds_lv; float cut_bounds_hv; struct Node_float *left_child; struct Node_float *right_child; } Node_float; typedef struct { float *bbox; int8_t no_dims; uint32_t *pidx; struct Node_float *root; } Tree_float; typedef struct { double cut_val; int8_t cut_dim; uint32_t start_idx; uint32_t n; double cut_bounds_lv; double cut_bounds_hv; struct Node_double *left_child; struct Node_double *right_child; } Node_double; typedef struct { double *bbox; int8_t no_dims; uint32_t *pidx; struct Node_double *root; } Tree_double; void insert_point_float(uint32_t *closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k); void get_bounding_box_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, float *bbox); int partition_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t *cut_dim, float *cut_val, uint32_t *n_lo); Tree_float* construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_float* construct_subtree_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox); Node_float * create_node_float(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_float(Node_float *root); void delete_tree_float(Tree_float *tree); void print_tree_float(Node_float *root, int level); float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims); float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox); float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox); void search_leaf_float(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, float *restrict closest_dist); void search_leaf_float_mask(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t *restrict mask, uint32_t *restrict closest_idx, float *restrict closest_dist); void search_splitnode_float(Node_float *root, float *pa, uint32_t *pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t *mask, uint32_t * closest_idx, float *closest_dist); void search_tree_float(Tree_float *tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t *mask, uint32_t *closest_idxs, float *closest_dists); void insert_point_double(uint32_t *closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k); void get_bounding_box_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, double *bbox); int partition_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t *cut_dim, double *cut_val, uint32_t *n_lo); Tree_double* construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp); Node_double* construct_subtree_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox); Node_double * create_node_double(uint32_t start_idx, uint32_t n, int is_leaf); void delete_subtree_double(Node_double *root); void delete_tree_double(Tree_double *tree); void print_tree_double(Node_double *root, int level); double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims); double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox); double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox); void search_leaf_double(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, double *restrict closest_dist); void search_leaf_double_mask(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t *restrict mask, uint32_t *restrict closest_idx, double *restrict closest_dist); void search_splitnode_double(Node_double *root, double *pa, uint32_t *pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t *mask, uint32_t * closest_idx, double *closest_dist); void search_tree_double(Tree_double *tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t *mask, uint32_t *closest_idxs, double *closest_dists); /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_float(uint32_t *closest_idx, float *closest_dist, uint32_t pidx, float cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, float *bbox) { float cur; int8_t i, j; uint32_t bbox_idx, i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *bbox, int8_t *cut_dim, float *cut_val, uint32_t *n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; float size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* No points less than split. Split at lowest point instead. Minimum 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* No points greater than split. Split at highest point instead. Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_float* construct_subtree_float(float *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, float *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_float *root = create_node_float(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; float cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_float(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_float* construct_tree_float(float *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_float *tree = (Tree_float *)malloc(sizeof(Tree_float)); uint32_t i; uint32_t *pidx; float *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *)malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (float *)malloc(2 * sizeof(float) * no_dims); get_bounding_box_float(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_float *)construct_subtree_float(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_float* create_node_float(uint32_t start_idx, uint32_t n, int is_leaf) { Node_float *new_node; if (is_leaf) { /* Allocate only the part of the struct that will be used in a leaf node. This relies on the C99 specification of struct layout conservation and padding and that dereferencing is never attempted for the node pointers in a leaf. */ new_node = (Node_float *)malloc(sizeof(Node_float) - 2 * sizeof(Node_float *)); } else { new_node = (Node_float *)malloc(sizeof(Node_float)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_float(Node_float *root) { if (root->cut_dim != -1) { delete_subtree_float((Node_float *)root->left_child); delete_subtree_float((Node_float *)root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_float(Tree_float *tree) { delete_subtree_float((Node_float *)tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_float(Node_float *root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_float((Node_float *)root->left_child, level + 1); if (root->cut_dim != -1) print_tree_float((Node_float *)root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ float calc_dist_float(float *point1_coord, float *point2_coord, int8_t no_dims) { /* Calculate squared distance */ float dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ float get_cube_offset_float(int8_t dim, float *point_coord, float *bbox) { float dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ float get_min_dist_float(float *point_coord, int8_t no_dims, float *bbox) { float cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_float(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_float_mask(float *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, float *restrict point_coord, uint32_t k, uint8_t *mask, uint32_t *restrict closest_idx, float *restrict closest_dist) { float cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_float(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_float(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_float(Node_float *root, float *pa, uint32_t *pidx, int8_t no_dims, float *point_coord, float min_dist, uint32_t k, float distance_upper_bound, float eps_fac, uint8_t *mask, uint32_t *closest_idx, float *closest_dist) { int8_t dim; float dist_left, dist_right; float new_offset; float box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_float_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_float(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_float((Node_float *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Right of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_float((Node_float *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_float((Node_float *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Left of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit*/ search_splitnode_float((Node_float *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_float(Tree_float *tree, float *pa, float *point_coords, uint32_t num_points, uint32_t k, float distance_upper_bound, float eps, uint8_t *mask, uint32_t *closest_idxs, float *closest_dists) { float min_dist; float eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; float *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; #if defined(_MSC_VER) && defined(_OPENMP) int32_t i = 0; int32_t local_num_points = (int32_t) num_points; #else uint32_t i; uint32_t local_num_points = num_points; #endif Node_float *root = (Node_float *)tree->root; /* Queries are OpenMP enabled */ #pragma omp parallel { /* The low chunk size is important to avoid L2 cache trashing for spatial coherent query datasets */ #pragma omp for private(i, j) schedule(static, 100) nowait for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_float(point_coords + no_dims * i, no_dims, bbox); search_splitnode_float(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } } /************************************************ Insert point into priority queue Params: closest_idx : index queue closest_dist : distance queue pidx : permutation index of data points cur_dist : distance to point inserted k : number of neighbours ************************************************/ void insert_point_double(uint32_t *closest_idx, double *closest_dist, uint32_t pidx, double cur_dist, uint32_t k) { int i; for (i = k - 1; i > 0; i--) { if (closest_dist[i - 1] > cur_dist) { closest_dist[i] = closest_dist[i - 1]; closest_idx[i] = closest_idx[i - 1]; } else { break; } } closest_idx[i] = pidx; closest_dist[i] = cur_dist; } /************************************************ Get the bounding box of a set of points Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions n : number of points bbox : bounding box (return) ************************************************/ void get_bounding_box_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t n, double *bbox) { double cur; int8_t i, j; uint32_t bbox_idx, i2; /* Use first data point to initialize */ for (i = 0; i < no_dims; i++) { bbox[2 * i] = bbox[2 * i + 1] = PA(0, i); } /* Update using rest of data points */ for (i2 = 1; i2 < n; i2++) { for (j = 0; j < no_dims; j++) { bbox_idx = 2 * j; cur = PA(i2, j); if (cur < bbox[bbox_idx]) { bbox[bbox_idx] = cur; } else if (cur > bbox[bbox_idx + 1]) { bbox[bbox_idx + 1] = cur; } } } } /************************************************ Partition a range of data points by manipulation the permutation index. The sliding midpoint rule is used for the partitioning. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bbox : bounding box of data points cut_dim : dimension used for partition (return) cut_val : value of cutting point (return) n_lo : number of point below cutting plane (return) ************************************************/ int partition_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *bbox, int8_t *cut_dim, double *cut_val, uint32_t *n_lo) { int8_t dim = 0, i; uint32_t p, q, i2; double size = 0, min_val, max_val, split, side_len, cur_val; uint32_t end_idx = start_idx + n - 1; /* Find largest bounding box side */ for (i = 0; i < no_dims; i++) { side_len = bbox[2 * i + 1] - bbox[2 * i]; if (side_len > size) { dim = i; size = side_len; } } min_val = bbox[2 * dim]; max_val = bbox[2 * dim + 1]; /* Check for zero length or inconsistent */ if (min_val >= max_val) return 1; /* Use middle for splitting */ split = (min_val + max_val) / 2; /* Partition all data points around middle */ p = start_idx; q = end_idx; while (p <= q) { if (PA(p, dim) < split) { p++; } else if (PA(q, dim) >= split) { /* Guard for underflow */ if (q > 0) { q--; } else { break; } } else { PASWAP(p, q); p++; q--; } } /* Check for empty splits */ if (p == start_idx) { /* No points less than split. Split at lowest point instead. Minimum 1 point will be in lower box. */ uint32_t j = start_idx; split = PA(j, dim); for (i2 = start_idx + 1; i2 <= end_idx; i2++) { /* Find lowest point */ cur_val = PA(i2, dim); if (cur_val < split) { j = i2; split = cur_val; } } PASWAP(j, start_idx); p = start_idx + 1; } else if (p == end_idx + 1) { /* No points greater than split. Split at highest point instead. Minimum 1 point will be in higher box. */ uint32_t j = end_idx; split = PA(j, dim); for (i2 = start_idx; i2 < end_idx; i2++) { /* Find highest point */ cur_val = PA(i2, dim); if (cur_val > split) { j = i2; split = cur_val; } } PASWAP(j, end_idx); p = end_idx; } /* Set return values */ *cut_dim = dim; *cut_val = split; *n_lo = p - start_idx; return 0; } /************************************************ Construct a sub tree over a range of data points. Params: pa : data points pidx : permutation index of data points no_dims: number of dimensions start_idx : index of first data point to use n : number of data points bsp : number of points per leaf bbox : bounding box of set of data points ************************************************/ Node_double* construct_subtree_double(double *pa, uint32_t *pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, uint32_t bsp, double *bbox) { /* Create new node */ int is_leaf = (n <= bsp); Node_double *root = create_node_double(start_idx, n, is_leaf); int rval; int8_t cut_dim; uint32_t n_lo; double cut_val, lv, hv; if (is_leaf) { /* Make leaf node */ root->cut_dim = -1; } else { /* Make split node */ /* Partition data set and set node info */ rval = partition_double(pa, pidx, no_dims, start_idx, n, bbox, &cut_dim, &cut_val, &n_lo); if (rval == 1) { root->cut_dim = -1; return root; } root->cut_val = cut_val; root->cut_dim = cut_dim; /* Recurse on both subsets */ lv = bbox[2 * cut_dim]; hv = bbox[2 * cut_dim + 1]; /* Set bounds for cut dimension */ root->cut_bounds_lv = lv; root->cut_bounds_hv = hv; /* Update bounding box before call to lower subset and restore after */ bbox[2 * cut_dim + 1] = cut_val; root->left_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx, n_lo, bsp, bbox); bbox[2 * cut_dim + 1] = hv; /* Update bounding box before call to higher subset and restore after */ bbox[2 * cut_dim] = cut_val; root->right_child = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, start_idx + n_lo, n - n_lo, bsp, bbox); bbox[2 * cut_dim] = lv; } return root; } /************************************************ Construct a tree over data points. Params: pa : data points no_dims: number of dimensions n : number of data points bsp : number of points per leaf ************************************************/ Tree_double* construct_tree_double(double *pa, int8_t no_dims, uint32_t n, uint32_t bsp) { Tree_double *tree = (Tree_double *)malloc(sizeof(Tree_double)); uint32_t i; uint32_t *pidx; double *bbox; tree->no_dims = no_dims; /* Initialize permutation array */ pidx = (uint32_t *)malloc(sizeof(uint32_t) * n); for (i = 0; i < n; i++) { pidx[i] = i; } bbox = (double *)malloc(2 * sizeof(double) * no_dims); get_bounding_box_double(pa, pidx, no_dims, n, bbox); tree->bbox = bbox; /* Construct subtree on full dataset */ tree->root = (struct Node_double *)construct_subtree_double(pa, pidx, no_dims, 0, n, bsp, bbox); tree->pidx = pidx; return tree; } /************************************************ Create a tree node. Params: start_idx : index of first data point to use n : number of data points ************************************************/ Node_double* create_node_double(uint32_t start_idx, uint32_t n, int is_leaf) { Node_double *new_node; if (is_leaf) { /* Allocate only the part of the struct that will be used in a leaf node. This relies on the C99 specification of struct layout conservation and padding and that dereferencing is never attempted for the node pointers in a leaf. */ new_node = (Node_double *)malloc(sizeof(Node_double) - 2 * sizeof(Node_double *)); } else { new_node = (Node_double *)malloc(sizeof(Node_double)); } new_node->n = n; new_node->start_idx = start_idx; return new_node; } /************************************************ Delete subtree Params: root : root node of subtree to delete ************************************************/ void delete_subtree_double(Node_double *root) { if (root->cut_dim != -1) { delete_subtree_double((Node_double *)root->left_child); delete_subtree_double((Node_double *)root->right_child); } free(root); } /************************************************ Delete tree Params: tree : Tree struct of kd tree ************************************************/ void delete_tree_double(Tree_double *tree) { delete_subtree_double((Node_double *)tree->root); free(tree->bbox); free(tree->pidx); free(tree); } /************************************************ Print ************************************************/ void print_tree_double(Node_double *root, int level) { int i; for (i = 0; i < level; i++) { printf(" "); } printf("(cut_val: %f, cut_dim: %i)\n", root->cut_val, root->cut_dim); if (root->cut_dim != -1) print_tree_double((Node_double *)root->left_child, level + 1); if (root->cut_dim != -1) print_tree_double((Node_double *)root->right_child, level + 1); } /************************************************ Calculate squared cartesian distance between points Params: point1_coord : point 1 point2_coord : point 2 ************************************************/ double calc_dist_double(double *point1_coord, double *point2_coord, int8_t no_dims) { /* Calculate squared distance */ double dist = 0, dim_dist; int8_t i; for (i = 0; i < no_dims; i++) { dim_dist = point2_coord[i] - point1_coord[i]; dist += dim_dist * dim_dist; } return dist; } /************************************************ Get squared distance from point to cube in specified dimension Params: dim : dimension point_coord : cartesian coordinates of point bbox : cube ************************************************/ double get_cube_offset_double(int8_t dim, double *point_coord, double *bbox) { double dim_coord = point_coord[dim]; if (dim_coord < bbox[2 * dim]) { /* Left of cube in dimension */ return dim_coord - bbox[2 * dim]; } else if (dim_coord > bbox[2 * dim + 1]) { /* Right of cube in dimension */ return dim_coord - bbox[2 * dim + 1]; } else { /* Inside cube in dimension */ return 0.; } } /************************************************ Get minimum squared distance between point and cube. Params: point_coord : cartesian coordinates of point no_dims : number of dimensions bbox : cube ************************************************/ double get_min_dist_double(double *point_coord, int8_t no_dims, double *bbox) { double cube_offset = 0, cube_offset_dim; int8_t i; for (i = 0; i < no_dims; i++) { cube_offset_dim = get_cube_offset_double(i, point_coord, bbox); cube_offset += cube_offset_dim * cube_offset_dim; } return cube_offset; } /************************************************ Search a leaf node for closest point Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint32_t *restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search a leaf node for closest point with data point mask Params: pa : data points pidx : permutation index of data points no_dims : number of dimensions start_idx : index of first data point to use size : number of data points point_coord : query point mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_leaf_double_mask(double *restrict pa, uint32_t *restrict pidx, int8_t no_dims, uint32_t start_idx, uint32_t n, double *restrict point_coord, uint32_t k, uint8_t *mask, uint32_t *restrict closest_idx, double *restrict closest_dist) { double cur_dist; uint32_t i; /* Loop through all points in leaf */ for (i = 0; i < n; i++) { /* Is this point masked out? */ if (mask[pidx[start_idx + i]]) { continue; } /* Get distance to query point */ cur_dist = calc_dist_double(&PA(start_idx + i, 0), point_coord, no_dims); /* Update closest info if new point is closest so far*/ if (cur_dist < closest_dist[k - 1]) { insert_point_double(closest_idx, closest_dist, pidx[start_idx + i], cur_dist, k); } } } /************************************************ Search subtree for nearest to query point Params: root : root node of subtree pa : data points pidx : permutation index of data points no_dims : number of dimensions point_coord : query point min_dist : minumum distance to nearest neighbour mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_splitnode_double(Node_double *root, double *pa, uint32_t *pidx, int8_t no_dims, double *point_coord, double min_dist, uint32_t k, double distance_upper_bound, double eps_fac, uint8_t *mask, uint32_t *closest_idx, double *closest_dist) { int8_t dim; double dist_left, dist_right; double new_offset; double box_diff; /* Skip if distance bound exeeded */ if (min_dist > distance_upper_bound) { return; } dim = root->cut_dim; /* Handle leaf node */ if (dim == -1) { if (mask) { search_leaf_double_mask(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, mask, closest_idx, closest_dist); } else { search_leaf_double(pa, pidx, no_dims, root->start_idx, root->n, point_coord, k, closest_idx, closest_dist); } return; } /* Get distance to cutting plane */ new_offset = point_coord[dim] - root->cut_val; if (new_offset < 0) { /* Left of cutting plane */ dist_left = min_dist; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit */ search_splitnode_double((Node_double *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Right of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = root->cut_bounds_lv - point_coord[dim]; if (box_diff < 0) { box_diff = 0; } dist_right = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_double((Node_double *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } else { /* Right of cutting plane */ dist_right = min_dist; if (dist_right < closest_dist[k - 1] * eps_fac) { /* Search right subtree if minimum distance is below limit*/ search_splitnode_double((Node_double *)root->right_child, pa, pidx, no_dims, point_coord, dist_right, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } /* Left of cutting plane. Update minimum distance. See Algorithms for Fast Vector Quantization Sunil Arya and David M. Mount. */ box_diff = point_coord[dim] - root->cut_bounds_hv; if (box_diff < 0) { box_diff = 0; } dist_left = min_dist - box_diff * box_diff + new_offset * new_offset; if (dist_left < closest_dist[k - 1] * eps_fac) { /* Search left subtree if minimum distance is below limit*/ search_splitnode_double((Node_double *)root->left_child, pa, pidx, no_dims, point_coord, dist_left, k, distance_upper_bound, eps_fac, mask, closest_idx, closest_dist); } } } /************************************************ Search for nearest neighbour for a set of query points Params: tree : Tree struct of kd tree pa : data points pidx : permutation index of data points point_coords : query points num_points : number of query points mask : boolean array of invalid (True) and valid (False) data points closest_idx : index of closest data point found (return) closest_dist : distance to closest point (return) ************************************************/ void search_tree_double(Tree_double *tree, double *pa, double *point_coords, uint32_t num_points, uint32_t k, double distance_upper_bound, double eps, uint8_t *mask, uint32_t *closest_idxs, double *closest_dists) { double min_dist; double eps_fac = 1 / ((1 + eps) * (1 + eps)); int8_t no_dims = tree->no_dims; double *bbox = tree->bbox; uint32_t *pidx = tree->pidx; uint32_t j = 0; #if defined(_MSC_VER) && defined(_OPENMP) int32_t i = 0; int32_t local_num_points = (int32_t) num_points; #else uint32_t i; uint32_t local_num_points = num_points; #endif Node_double *root = (Node_double *)tree->root; /* Queries are OpenMP enabled */ #pragma omp parallel { /* The low chunk size is important to avoid L2 cache trashing for spatial coherent query datasets */ #pragma omp for private(i, j) schedule(static, 100) nowait for (i = 0; i < local_num_points; i++) { for (j = 0; j < k; j++) { closest_idxs[i * k + j] = UINT32_MAX; closest_dists[i * k + j] = DBL_MAX; } min_dist = get_min_dist_double(point_coords + no_dims * i, no_dims, bbox); search_splitnode_double(root, pa, pidx, no_dims, point_coords + no_dims * i, min_dist, k, distance_upper_bound, eps_fac, mask, &closest_idxs[i * k], &closest_dists[i * k]); } } }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { register ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=QuantumRange-GetPixelIntensity(images,p); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(Quantum) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,crop_image,1,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { SetPixelBackgoundColor(crop_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { SetPixelBackgoundColor(excerpt_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == geometry->width) && (image->rows == geometry->height) && (geometry->x == 0) && (geometry->y == 0)) return(CloneImage(image,0,0,MagickTrue,exception)); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); (void) SetImageBackgroundColor(extent_image,exception); (void) CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flip_image,1,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { SetPixelBackgoundColor(flip_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flop_image,1,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { register ssize_t i; q-=GetPixelChannels(flop_image); if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelReadMask(source,p) == 0) { SetPixelBackgoundColor(destination,q); p+=GetPixelChannels(source); q+=GetPixelChannels(destination); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel=GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry,exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { SetPixelBackgoundColor(transpose_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(transverse_image); if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(Quantum) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
rar5_fmt_plug.c
/* RAR 5.0 cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia. * * http://www.rarlab.com/technote.htm * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the * following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * $rar5$<salt_len>$<salt>$<iter_log2>$<iv>$<pswcheck_len>$<pswcheck> */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rar5; #elif FMT_REGISTERS_H john_register_one(&fmt_rar5); #else #include <string.h> #include <assert.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "arch.h" #include "johnswap.h" #include "stdint.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "rar5_common.h" //#define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX #include "pbkdf2_hmac_sha256.h" #include "memdbg.h" #define FORMAT_LABEL "RAR5" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA256 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i, j; unsigned char PswCheck[SIZE_PSWCHECK], PswCheckValue[SSE_GROUP_SZ_SHA256][SHA256_DIGEST_SIZE]; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (ARCH_WORD_32*)PswCheckValue[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, &(x.poutc), SHA256_DIGEST_SIZE, 0); // special wtf processing for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[j][i]; memcpy((void*)crypt_out[index+j], PswCheck, SIZE_PSWCHECK); } #else unsigned char PswCheckValue[SHA256_DIGEST_SIZE]; unsigned char PswCheck[SIZE_PSWCHECK]; int i; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, PswCheckValue, SHA256_DIGEST_SIZE, 0); // special wtf processing memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[i]; memcpy((void*)crypt_out[index], PswCheck, SIZE_PSWCHECK); #endif } return count; } static void rar5_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_rar5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, rar5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
mpy_common.h
#ifndef _MPY_EXTERNAL_COMMON_H #define _MPY_EXTERNAL_COMMON_H #include <omp.h> #include <offload.h> #include <numpy/npy_os.h> /* Some usefull macros */ #ifdef NPY_OS_WIN32 #define MPY_TARGET_MIC __declspec(target(mic)) #else #define MPY_TARGET_MIC __attribute__((target(mic))) #endif /* Memset on target */ static NPY_INLINE void * target_memset(void *ptr, int value, size_t num, int device_num) { #pragma omp target device(device_num) map(to: ptr, value, num) memset(ptr, value, num); return ptr; } #define target_alloc omp_target_alloc #define target_malloc omp_target_alloc #define target_free omp_target_free #define target_memcpy(dst, src, len, dst_dev, src_dev) \ omp_target_memcpy(dst, src, len, 0, 0, dst_dev, src_dev) #ifdef NPY_ALLOW_THREADS #define MPY_BEGIN_THREADS_NDITER(iter) \ do { \ if (!MpyIter_IterationNeedsAPI(iter)) { \ NPY_BEGIN_THREADS_THRESHOLDED(MpyIter_GetIterSize(iter)); \ } \ } while(0) #else #define MPY_BEGIN_THREADS_NDITER(iter) #endif /* Backward compatibility for numpy 1.12 */ #ifndef NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE #define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 #endif /* End array iter */ #endif
LearnerEvaluator.h
/** * evaluation: LearnerEvaluator.h * Copyright (c) Torr Vision Group, University of Oxford, 2015. All rights reserved. */ #ifndef H_EVALUATION_LEARNEREVALUATOR #define H_EVALUATION_LEARNEREVALUATOR #include "../splitgenerators/SplitGenerator.h" namespace evaluation { /** * \brief An instance of a class deriving from an instantiation of this class template can be used to * evaluate a learner (e.g. a random forest) using approaches based on example set splitting. */ template <typename Example, typename Result> class LearnerEvaluator { //#################### TYPEDEFS #################### protected: typedef boost::shared_ptr<const Example> Example_CPtr; typedef Result ResultType; //#################### PRIVATE VARIABLES #################### private: /** The generator to use to split the example set. */ SplitGenerator_Ptr m_splitGenerator; //#################### CONSTRUCTORS #################### protected: /** * \brief Constructs a learner evaluator. * * \param splitGenerator The generator to use to split the example set. */ explicit LearnerEvaluator(const SplitGenerator_Ptr& splitGenerator) : m_splitGenerator(splitGenerator) {} //#################### DESTRUCTOR #################### public: /** * \brief Destroys the learner evaluator. */ virtual ~LearnerEvaluator() {} //#################### PROTECTED ABSTRACT MEMBER FUNCTIONS #################### protected: /** * \brief Averages the individual results from the various splits. * * \param results The results from the various splits. * \return The average of the results. */ virtual Result average_results(const std::vector<Result>& results) const = 0; /** * \brief Evaluates the learner on the specified split of examples. * * \param examples The examples on which to evaluate the learner. * \param split The way in which the examples should be split into training and validation sets. * \return The results of evaluating the learner on the specified split. */ virtual Result evaluate_on_split(const std::vector<Example_CPtr>& examples, const SplitGenerator::Split& split) const = 0; //#################### PUBLIC MEMBER FUNCTIONS #################### public: /** * \brief Evaluates the learner on the specified set of examples. * * \param examples The examples on which to evaluate the learner. * \return The results of the evaluation process. */ Result evaluate(const std::vector<Example_CPtr>& examples) const { std::vector<Result> results; std::vector<SplitGenerator::Split> splits = m_splitGenerator->generate_splits(examples.size()); int size = static_cast<int>(splits.size()); #ifdef WITH_OPENMP #pragma omp parallel for #endif for(int i = 0; i < size; ++i) { Result evaluationResult = evaluate_on_split(examples, splits[i]); #ifdef WITH_OPENMP #pragma omp critical #endif results.push_back(evaluationResult); } return average_results(results); } }; } #endif
data.c
#include "../mesh.h" #include "../params.h" #include "../shared.h" #include "../umesh.h" #include <math.h> #include <stdlib.h> // Allocates a double precision array size_t allocate_data(double** buf, size_t len) { #ifdef INTEL *buf = (double*)_mm_malloc(sizeof(double) * len, VEC_ALIGN); #else *buf = (double*)malloc(sizeof(double) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } // Perform first-touch #pragma omp parallel for for (size_t ii = 0; ii < len; ++ii) { (*buf)[ii] = 0.0; } return sizeof(double) * len; } // Allocates a single precision array size_t allocate_float_data(float** buf, size_t len) { #ifdef INTEL *buf = (float*)_mm_malloc(sizeof(float) * len, VEC_ALIGN); #else *buf = (float*)malloc(sizeof(float) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } // Perform first-touch #pragma omp parallel for for (size_t ii = 0; ii < len; ++ii) { (*buf)[ii] = 0.0; } return sizeof(double) * len; } // Allocates a 32-bit integer array size_t allocate_int_data(int** buf, size_t len) { #ifdef INTEL *buf = (int*)_mm_malloc(sizeof(int) * len, VEC_ALIGN); #else *buf = (int*)malloc(sizeof(int) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } // Perform first-touch #pragma omp parallel for for (size_t ii = 0; ii < len; ++ii) { (*buf)[ii] = 0; } return sizeof(int) * len; } // Allocates a 64-bit integer array size_t allocate_uint64_data(uint64_t** buf, const size_t len) { #ifdef INTEL *buf = (uint64_t*)_mm_malloc(sizeof(uint64_t) * len, VEC_ALIGN); #else *buf = (uint64_t*)malloc(sizeof(uint64_t) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } // Perform first-touch #pragma omp parallel for for (size_t ii = 0; ii < len; ++ii) { (*buf)[ii] = 0; } return sizeof(uint64_t) * len; } // Allocates a complex double array size_t allocate_complex_double_data(_Complex double** buf, const size_t len) { #ifdef INTEL *buf = (_Complex double*)_mm_malloc(sizeof(_Complex double) * len, VEC_ALIGN); #else *buf = (_Complex double*)malloc(sizeof(_Complex double) * len); #endif if (*buf == NULL) { TERMINATE("Failed to allocate a data array.\n"); } // Perform first-touch #pragma omp parallel for for (size_t ii = 0; ii < len; ++ii) { (*buf)[ii] = 0.0; } return sizeof(_Complex double) * len; } // Allocates a host copy of some buffer void allocate_host_data(double** buf, size_t len) { allocate_data(buf, len); } // Allocates a host copy of some buffer void allocate_host_int_data(int** buf, size_t len) { allocate_int_data(buf, len); } // Deallocate a double array void deallocate_data(double* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Deallocates a float array void deallocate_float_data(float* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Deallocation of host data void deallocate_host_data(double* buf) { // Not necessary as host-only } // Deallocates a 32-bit integer array void deallocate_int_data(int* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Deallocates a 64-bit integer array void deallocate_uint64_t_data(uint64_t* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Deallocates complex double data void deallocate_complex_double_data(_Complex double* buf) { #ifdef INTEL _mm_free(buf); #else free(buf); #endif } // Allocates a data array void deallocate_host_int_data(int* buf) { // Not necessary as host-only } // Just swaps the buffers on the host void copy_buffer(const size_t len, double** src, double** dst, int send) { double* temp = *src; *src = *dst; *dst = temp; } // Just swaps the buffers on the host void copy_int_buffer(const size_t len, int** src, int** dst, int send) { int* temp = *src; *src = *dst; *dst = temp; } // Move a host buffer onto the device void move_host_buffer_to_device(const size_t len, double** src, double** dst) { copy_buffer(len, src, dst, SEND); } // Initialises mesh data in device specific manner void mesh_data_init_2d(const int local_nx, const int local_ny, const int global_nx, const int global_ny, const int pad, const int x_off, const int y_off, const double width, const double height, double* edgex, double* edgey, double* edgedx, double* edgedy, double* celldx, double* celldy) { // Simple uniform rectilinear initialisation #pragma omp parallel for for (int ii = 0; ii < local_nx + 1; ++ii) { edgedx[ii] = width / (global_nx); // Note: correcting for padding edgex[ii] = edgedx[ii] * (x_off + ii - pad); } #pragma omp parallel for for (int ii = 0; ii < local_nx; ++ii) { celldx[ii] = width / (global_nx); } #pragma omp parallel for for (int ii = 0; ii < local_ny + 1; ++ii) { edgedy[ii] = height / (global_ny); // Note: correcting for padding edgey[ii] = edgedy[ii] * (y_off + ii - pad); } #pragma omp parallel for for (int ii = 0; ii < local_ny; ++ii) { celldy[ii] = height / (global_ny); } } // Initialises mesh data in device specific manner void mesh_data_init_3d(const int local_nx, const int local_ny, const int local_nz, const int global_nx, const int global_ny, const int global_nz, const int pad, const int x_off, const int y_off, const int z_off, const double width, const double height, const double depth, double* edgex, double* edgey, double* edgez, double* edgedx, double* edgedy, double* edgedz, double* celldx, double* celldy, double* celldz) { // Initialise as in the 2d case mesh_data_init_2d(local_nx, local_ny, global_nx, global_ny, pad, x_off, y_off, width, height, edgex, edgey, edgedx, edgedy, celldx, celldy); // Simple uniform rectilinear initialisation #pragma omp parallel for for (int ii = 0; ii < local_nz + 1; ++ii) { edgedz[ii] = depth / (global_nz); edgez[ii] = edgedz[ii] * (z_off + ii - pad); } #pragma omp parallel for for (int ii = 0; ii < local_nz; ++ii) { celldz[ii] = depth / (global_nz); } } // Initialise state data in device specific manner void set_problem_2d(const int local_nx, const int local_ny, const int pad, const double mesh_width, const double mesh_height, const double* edgex, const double* edgey, const int ndims, const char* problem_def_filename, double* rho, double* e, double* x) { char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * MAX_STR_LEN); double* values = (double*)malloc(sizeof(double) * MAX_KEYS); int nentries = 0; while (1) { char specifier[MAX_STR_LEN]; sprintf(specifier, "problem_%d", nentries++); int nkeys = 0; if (!get_key_value_parameter(specifier, problem_def_filename, keys, values, &nkeys)) { break; } // The last four keys are the bound specification double xpos = values[nkeys - 4] * mesh_width; double ypos = values[nkeys - 3] * mesh_height; double width = values[nkeys - 2] * mesh_width; double height = values[nkeys - 1] * mesh_height; // Loop through the mesh and set the problem for (int ii = pad; ii < local_ny - pad; ++ii) { for (int jj = pad; jj < local_nx - pad; ++jj) { double global_xpos = edgex[jj]; double global_ypos = edgey[ii]; // Check we are in bounds of the problem entry if (global_xpos >= xpos && global_ypos >= ypos && global_xpos < xpos + width && global_ypos < ypos + height) { // The upper bound excludes the bounding box for the entry for (int kk = 0; kk < nkeys - (2 * ndims); ++kk) { const char* key = &keys[kk * MAX_STR_LEN]; if (strmatch(key, "density")) { rho[ii * local_nx + jj] = values[kk]; } else if (strmatch(key, "energy")) { e[ii * local_nx + jj] = values[kk]; } else if (strmatch(key, "temperature")) { x[ii * local_nx + jj] = values[kk]; } else { TERMINATE("Found unrecognised key in %s : %s.\n", problem_def_filename, key); } } } } } } free(keys); free(values); } // Initialise state data in device specific manner void set_problem_3d(const int local_nx, const int local_ny, const int local_nz, const int pad, const double mesh_width, const double mesh_height, const double mesh_depth, const double* edgex, const double* edgey, const double* edgez, const int ndims, const char* problem_def_filename, double* rho, double* e, double* x) { char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * MAX_STR_LEN); double* values = (double*)malloc(sizeof(double) * MAX_KEYS); int nentries = 0; while (1) { char specifier[MAX_STR_LEN]; sprintf(specifier, "problem_%d", nentries++); int nkeys = 0; if (!get_key_value_parameter(specifier, problem_def_filename, keys, values, &nkeys)) { break; } // The last four keys are the bound specification double xpos = values[nkeys - 6] * mesh_width; double ypos = values[nkeys - 5] * mesh_height; double zpos = values[nkeys - 4] * mesh_depth; double width = values[nkeys - 3] * mesh_width; double height = values[nkeys - 2] * mesh_height; double depth = values[nkeys - 1] * mesh_depth; // Loop through the mesh and set the problem for (int ii = pad; ii < local_nz - pad; ++ii) { for (int jj = pad; jj < local_ny - pad; ++jj) { for (int kk = pad; kk < local_nx - pad; ++kk) { double global_xpos = edgex[kk]; double global_ypos = edgey[jj]; double global_zpos = edgez[ii]; // Check we are in bounds of the problem entry if (global_xpos >= xpos && global_ypos >= ypos && global_zpos >= zpos && global_xpos < xpos + width && global_ypos < ypos + height && global_zpos < zpos + depth) { // The upper bound excludes the bounding box for the entry for (int ee = 0; ee < nkeys - (2 * ndims); ++ee) { const int index = (ii * local_nx * local_ny) + (jj * local_nx) + (kk); const char* key = &keys[ee * MAX_STR_LEN]; if (strmatch(key, "density")) { rho[(index)] = values[ee]; } else if (strmatch(key, "energy")) { e[(index)] = values[ee]; } else if (strmatch(key, "temperature")) { x[(index)] = values[ee]; } else { TERMINATE("Found unrecognised key in %s : %s.\n", problem_def_filename, key); } } } } } } } free(keys); free(values); } // Finds the normals for all boundary cells void find_boundary_normals(UnstructuredMesh* umesh, int* boundary_face_list) { // Loop through all of the boundary cells and find their normals #pragma omp parallel for for (int nn = 0; nn < umesh->nnodes; ++nn) { const int boundary_index = umesh->boundary_index[(nn)]; if (boundary_index == IS_INTERIOR) { continue; } double normal_x = 0.0; double normal_y = 0.0; for (int bb1 = 0; bb1 < umesh->nboundary_nodes; ++bb1) { const int node0 = boundary_face_list[bb1 * 2]; const int node1 = boundary_face_list[bb1 * 2 + 1]; if (node0 == nn || node1 == nn) { const double node0_x = umesh->nodes_x0[(node0)]; const double node0_y = umesh->nodes_y0[(node0)]; const double node1_x = umesh->nodes_x0[(node1)]; const double node1_y = umesh->nodes_y0[(node1)]; normal_x += node0_y - node1_y; normal_y += -(node0_x - node1_x); } } // We are fixed if we are one of the four corners if ((umesh->nodes_x0[(nn)] == 0.0 || umesh->nodes_x0[(nn)] == 1.0) && (umesh->nodes_y0[(nn)] == 0.0 || umesh->nodes_y0[(nn)] == 1.0)) { umesh->boundary_type[(boundary_index)] = IS_CORNER; } else { umesh->boundary_type[(boundary_index)] = IS_BOUNDARY; } const double normal_mag = sqrt(normal_x * normal_x + normal_y * normal_y); umesh->boundary_normal_x[(boundary_index)] = normal_x / normal_mag; umesh->boundary_normal_y[(boundary_index)] = normal_y / normal_mag; } } // Finds the normals for all boundary cells void find_boundary_normals_3d(UnstructuredMesh* umesh, int* boundary_face_list) { TERMINATE("%s not yet implemented.", __func__); #if 0 // Loop through all of the boundary cells and find their normals #pragma omp parallel for for (int nn = 0; nn < umesh->nnodes; ++nn) { const int boundary_index = umesh->boundary_index[(nn)]; if (boundary_index == IS_INTERIOR) { continue; } double normal_x = 0.0; double normal_y = 0.0; for (int bb1 = 0; bb1 < umesh->nboundary_faces; ++bb1) { const int node0 = boundary_face_list[bb1 * 2]; const int node1 = boundary_face_list[bb1 * 2 + 1]; if (node0 == nn || node1 == nn) { const double node0_x = umesh->nodes_x0[(node0)]; const double node0_y = umesh->nodes_y0[(node0)]; const double node1_x = umesh->nodes_x0[(node1)]; const double node1_y = umesh->nodes_y0[(node1)]; normal_x += node0_y - node1_y; normal_y += -(node0_x - node1_x); } } // We are fixed if we are one of the four corners if ((umesh->nodes_x0[(nn)] == 0.0 || umesh->nodes_x0[(nn)] == 1.0) && (umesh->nodes_y0[(nn)] == 0.0 || umesh->nodes_y0[(nn)] == 1.0)) { umesh->boundary_type[(boundary_index)] = IS_CORNER; } else { umesh->boundary_type[(boundary_index)] = IS_BOUNDARY; } const double normal_mag = sqrt(normal_x * normal_x + normal_y * normal_y); umesh->boundary_normal_x[(boundary_index)] = normal_x / normal_mag; umesh->boundary_normal_y[(boundary_index)] = normal_y / normal_mag; } #endif // if 0 }
QuadNodePolarEuclid.h
/* * QuadNodePolarEuclid.h * * Created on: 21.05.2014 * Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu) * * Note: This is similar enough to QuadNode.h that one could merge these two classes. */ #ifndef QUADNODEPOLAREUCLID_H_ #define QUADNODEPOLAREUCLID_H_ #include <vector> #include <algorithm> #include <functional> #include <assert.h> #include "../../auxiliary/Log.h" #include "../../geometric/HyperbolicSpace.h" using std::vector; using std::min; using std::max; using std::cos; namespace NetworKit { template <class T> class QuadNodePolarEuclid { friend class QuadTreeGTest; private: double leftAngle; double minR; double rightAngle; double maxR; Point2D<double> a,b,c,d; unsigned capacity; static const unsigned coarsenLimit = 4; count subTreeSize; std::vector<T> content; std::vector<Point2D<double> > positions; std::vector<double> angles; std::vector<double> radii; bool isLeaf; bool splitTheoretical; double balance; index ID; double lowerBoundR; public: std::vector<QuadNodePolarEuclid> children; QuadNodePolarEuclid() { //This should never be called. leftAngle = 0; rightAngle = 0; minR = 0; maxR = 0; capacity = 20; isLeaf = true; subTreeSize = 0; balance = 0.5; splitTheoretical = false; lowerBoundR = maxR; ID = 0; } /** * Construct a QuadNode for polar coordinates. * * * @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi * @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi * @param minR Minimal radial coordinate of region, between 0 and 1 * @param maxR Maximal radial coordinate of region, between 0 and 1 * @param capacity Number of points a leaf cell can store before splitting * @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0 * @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true * @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use * */ QuadNodePolarEuclid(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double balance = 0.5) { if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1."); this->leftAngle = leftAngle; this->minR = minR; this->maxR = maxR; this->rightAngle = rightAngle; this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR); this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR); this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR); this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR); this->capacity = capacity; this->splitTheoretical = splitTheoretical; this->balance = balance; this->lowerBoundR = maxR; this->ID = 0; isLeaf = true; subTreeSize = 0; } void split() { assert(isLeaf); //heavy lifting: split up! double middleAngle, middleR; if (splitTheoretical) { //Euclidean space is distributed equally middleAngle = (rightAngle - leftAngle) / 2 + leftAngle; middleR = pow(maxR*maxR*(1-balance)+minR*minR*balance, 0.5); } else { //median of points vector<double> sortedAngles = angles; std::sort(sortedAngles.begin(), sortedAngles.end()); middleAngle = sortedAngles[sortedAngles.size()/2]; vector<double> sortedRadii = radii; std::sort(sortedRadii.begin(), sortedRadii.end()); middleR = sortedRadii[sortedRadii.size()/2]; } assert(middleR < maxR); assert(middleR > minR); QuadNodePolarEuclid southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, balance); QuadNodePolarEuclid southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, balance); QuadNodePolarEuclid northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, balance); QuadNodePolarEuclid northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, balance); children = {southwest, southeast, northwest, northeast}; isLeaf = false; } /** * Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full * * @param input arbitrary content, in our case an index * @param angle angular coordinate of point, between 0 and 2 pi. * @param R radial coordinate of point, between 0 and 1. */ void addContent(T input, double angle, double R) { assert(this->responsible(angle, R)); if (lowerBoundR > R) lowerBoundR = R; if (isLeaf) { if (content.size() + 1 < capacity) { content.push_back(input); angles.push_back(angle); radii.push_back(R); Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R); positions.push_back(pos); } else { split(); for (index i = 0; i < content.size(); i++) { this->addContent(content[i], angles[i], radii[i]); } assert(subTreeSize == content.size());//we have added everything twice subTreeSize = content.size(); content.clear(); angles.clear(); radii.clear(); positions.clear(); this->addContent(input, angle, R); } } else { assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (children[i].responsible(angle, R)) { children[i].addContent(input, angle, R); break; } } subTreeSize++; } } /** * Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree * * @param input Content to be removed * @param angle Angular coordinate * @param R Radial coordinate * * @return True if content was found and removed, false otherwise */ bool removeContent(T input, double angle, double R) { if (!responsible(angle, R)) return false; if (isLeaf) { index i = 0; for (; i < content.size(); i++) { if (content[i] == input) break; } if (i < content.size()) { assert(angles[i] == angle); assert(radii[i] == R); //remove element content.erase(content.begin()+i); positions.erase(positions.begin()+i); angles.erase(angles.begin()+i); radii.erase(radii.begin()+i); return true; } else { return false; } } else { bool removed = false; bool allLeaves = true; assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (!children[i].isLeaf) allLeaves = false; if (children[i].removeContent(input, angle, R)) { assert(!removed); removed = true; } } if (removed) subTreeSize--; //coarsen? if (removed && allLeaves && size() < coarsenLimit) { //coarsen!! //why not assert empty containers and then insert directly? vector<T> allContent; vector<Point2D<double> > allPositions; vector<double> allAngles; vector<double> allRadii; for (index i = 0; i < children.size(); i++) { allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end()); allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end()); allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end()); allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end()); } assert(subTreeSize == allContent.size()); assert(subTreeSize == allPositions.size()); assert(subTreeSize == allAngles.size()); assert(subTreeSize == allRadii.size()); children.clear(); content.swap(allContent); positions.swap(allPositions); angles.swap(allAngles); radii.swap(allRadii); isLeaf = true; } return removed; } } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * * @param query Center of the Euclidean query circle, given in Cartesian coordinates * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(Point2D<double> query, double radius) const { double phi, r; HyperbolicSpace::cartesianToPolar(query, phi, r); if (responsible(phi, r)) return false; //get four edge points double topDistance, bottomDistance, leftDistance, rightDistance; if (phi < leftAngle || phi > rightAngle) { topDistance = min(c.distance(query), d.distance(query)); } else { topDistance = abs(r - maxR); } if (topDistance <= radius) return false; if (phi < leftAngle || phi > rightAngle) { bottomDistance = min(a.distance(query), b.distance(query)); } else { bottomDistance = abs(r - minR); } if (bottomDistance <= radius) return false; double minDistanceR = r*cos(abs(phi-leftAngle)); if (minDistanceR > minR && minDistanceR < maxR) { leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { leftDistance = min(a.distance(query), d.distance(query)); } if (leftDistance <= radius) return false; minDistanceR = r*cos(abs(phi-rightAngle)); if (minDistanceR > minR && minDistanceR < maxR) { rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { rightDistance = min(b.distance(query), c.distance(query)); } if (rightDistance <= radius) return false; return true; } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones * * @param angle_c Angular coordinate of the Euclidean query circle's center * @param r_c Radial coordinate of the Euclidean query circle's center * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(double angle_c, double r_c, double radius) const { if (responsible(angle_c, r_c)) return false; Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c); return outOfReach(query, radius); } /** * @param phi Angular coordinate of query point * @param r_h radial coordinate of query point */ std::pair<double, double> EuclideanDistances(double phi, double r) const { /** * If the query point is not within the quadnode, the distance minimum is on the border. * Need to check whether extremum is between corners. */ double maxDistance = 0; double minDistance = std::numeric_limits<double>::max(); if (responsible(phi, r)) minDistance = 0; auto euclidDistancePolar = [](double phi_a, double r_a, double phi_b, double r_b){ return pow(r_a*r_a+r_b*r_b-2*r_a*r_b*cos(phi_a-phi_b), 0.5); }; auto updateMinMax = [&minDistance, &maxDistance, phi, r, euclidDistancePolar](double phi_b, double r_b){ double extremalValue = euclidDistancePolar(phi, r, phi_b, r_b); //assert(extremalValue <= r + r_b); maxDistance = std::max(extremalValue, maxDistance); minDistance = std::min(minDistance, extremalValue); }; /** * angular boundaries */ //left double extremum = r*cos(this->leftAngle - phi); if (extremum < maxR && extremum > minR) { updateMinMax(this->leftAngle, extremum); } //right extremum = r*cos(this->rightAngle - phi); if (extremum < maxR && extremum > minR) { updateMinMax(this->leftAngle, extremum); } /** * radial boundaries. */ if (phi > leftAngle && phi < rightAngle) { updateMinMax(phi, maxR); updateMinMax(phi, minR); } if (phi + PI > leftAngle && phi + PI < rightAngle) { updateMinMax(phi + PI, maxR); updateMinMax(phi + PI, minR); } if (phi - PI > leftAngle && phi -PI < rightAngle) { updateMinMax(phi - PI, maxR); updateMinMax(phi - PI, minR); } /** * corners */ updateMinMax(leftAngle, maxR); updateMinMax(rightAngle, maxR); updateMinMax(leftAngle, minR); updateMinMax(rightAngle, minR); //double shortCutGainMax = maxR + r - maxDistance; //assert(minDistance <= minR + r); //assert(maxDistance <= maxR + r); assert(minDistance < maxDistance); return std::pair<double, double>(minDistance, maxDistance); } /** * Does the point at (angle, r) fall inside the region managed by this QuadNode? * * @param angle Angular coordinate of input point * @param r Radial coordinate of input points * * @return True if input point lies within the region of this QuadNode */ bool responsible(double angle, double r) const { return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR); } /** * Get all Elements in this QuadNode or a descendant of it * * @return vector of content type T */ std::vector<T> getElements() const { if (isLeaf) { return content; } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); vector<T> result; for (index i = 0; i < children.size(); i++) { std::vector<T> subresult = children[i].getElements(); result.insert(result.end(), subresult.begin(), subresult.end()); } return result; } } void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const { assert(angles.size() == radii.size()); if (isLeaf) { anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end()); radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end()); } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); for (index i = 0; i < children.size(); i++) { children[i].getCoordinates(anglesContainer, radiiContainer); } } } /** * Main query method, get points lying in a Euclidean circle around the center point. * Optional limits can be given to get a different result or to reduce unnecessary comparisons * * Elements are pushed onto a vector which is a required argument. This is done to reduce copying * * Safe to call in parallel if diagnostics are disabled * * @param center Center of the query circle * @param radius Radius of the query circle * @param result Reference to the vector where the results will be stored * @param minAngle Optional value for the minimum angular coordinate of the query region * @param maxAngle Optional value for the maximum angular coordinate of the query region * @param lowR Optional value for the minimum radial coordinate of the query region * @param highR Optional value for the maximum radial coordinate of the query region */ void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*PI, double lowR=0, double highR = 1) const { if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return; if (outOfReach(center, radius)) { return; } if (isLeaf) { const double rsq = radius*radius; const double queryX = center[0]; const double queryY = center[1]; const count cSize = content.size(); for (int i=0; i < cSize; i++) { const double deltaX = positions[i].getX() - queryX; const double deltaY = positions[i].getY() - queryY; if (deltaX*deltaX + deltaY*deltaY < rsq) { result.push_back(content[i]); } } } else { for (index i = 0; i < children.size(); i++) { children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR); } } } count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const { double phi_q, r_q; HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q); if (suppressLeft && phi_q > rightAngle) return 0; TRACE("Getting Euclidean distances"); auto distancePair = EuclideanDistances(phi_q, r_q); double probUB = prob(distancePair.first); double probLB = prob(distancePair.second); assert(probLB <= probUB); if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps if (probUB == 0) return 0; //TODO: return whole if probLB == 1 double probdenom = std::log(1-probUB); if (probdenom == 0) { DEBUG(probUB, " not zero, but too small too process. Ignoring."); return 0; } TRACE("probUB: ", probUB, ", probdenom: ", probdenom); count expectedNeighbours = probUB*size(); count candidatesTested = 0; if (isLeaf) { const count lsize = content.size(); TRACE("Leaf of size ", lsize); for (index i = 0; i < lsize; i++) { //jump! if (probUB < 1) { double random = Aux::Random::real(); double delta = std::log(random) / probdenom; assert(delta == delta); assert(delta >= 0); i += delta; if (i >= lsize) break; TRACE("Jumped with delta ", delta, " arrived at ", i); } assert(i >= 0); //see where we've arrived candidatesTested++; double distance = positions[i].distance(euQuery); double q = prob(distance); q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities assert(q <= 1); assert(q >= 0); //accept? double acc = Aux::Random::real(); if (acc < q) { TRACE("Accepted node ", i, " with probability ", q, "."); result.push_back(content[i]); } } } else { if (expectedNeighbours < 4 || probUB < 1/1000) {//select candidates directly instead of calling recursively TRACE("probUB = ", probUB, ", switching to direct candidate selection."); assert(probUB < 1); const count stsize = size(); for (index i = 0; i < stsize; i++) { double delta = std::log(Aux::Random::real()) / probdenom; assert(delta >= 0); i += delta; TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement."); if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point else break; candidatesTested++; } } else {//carry on as normal for (index i = 0; i < children.size(); i++) { TRACE("Recursively calling child ", i); candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result); } } } //DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset); return candidatesTested; } void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const { TRACE("Maybe get element ", k, " with upper Bound ", upperBound); assert(k < size()); if (isLeaf) { double acceptance = prob(euQuery.distance(positions[k]))/upperBound; TRACE("Is leaf, accept with ", acceptance); if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]); } else { TRACE("Call recursively."); index offset = 0; for (index i = 0; i < children.size(); i++) { count childsize = children[i].size(); if (k - offset < childsize) { children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens); break; } offset += childsize; } } } /** * Shrink all vectors in this subtree to fit the content. * Call after quadtree construction is complete, causes better memory usage and cache efficiency */ void trim() { content.shrink_to_fit(); positions.shrink_to_fit(); angles.shrink_to_fit(); radii.shrink_to_fit(); if (!isLeaf) { for (index i = 0; i < children.size(); i++) { children[i].trim(); } } } /** * Number of points lying in the region managed by this QuadNode */ count size() const { return isLeaf ? content.size() : subTreeSize; } void recount() { subTreeSize = 0; for (index i = 0; i < children.size(); i++) { children[i].recount(); subTreeSize += children[i].size(); } } /** * Height of subtree hanging from this QuadNode */ count height() const { count result = 1;//if leaf node, the children loop will not execute for (auto child : children) result = std::max(result, child.height()+1); return result; } /** * Leaf cells in the subtree hanging from this QuadNode */ count countLeaves() const { if (isLeaf) return 1; count result = 0; for (index i = 0; i < children.size(); i++) { result += children[i].countLeaves(); } return result; } double getLeftAngle() const { return leftAngle; } double getRightAngle() const { return rightAngle; } double getMinR() const { return minR; } double getMaxR() const { return maxR; } index getID() const { return ID; } index indexSubtree(index nextID) { index result = nextID; assert(children.size() == 4 || children.size() == 0); for (int i = 0; i < children.size(); i++) { result = children[i].indexSubtree(result); } this->ID = result; return result+1; } index getCellID(double phi, double r) const { if (!responsible(phi, r)) return NetworKit::none; if (isLeaf) return getID(); else { for (int i = 0; i < children.size(); i++) { index childresult = children[i].getCellID(phi, r); if (childresult != NetworKit::none) return childresult; } throw std::runtime_error("No responsible child node found even though this node is responsible."); } } index getMaxIDInSubtree() const { if (isLeaf) return getID(); else { index result = -1; for (int i = 0; i < 4; i++) { result = std::max(children[i].getMaxIDInSubtree(), result); } return std::max(result, getID()); } } count reindex(count offset) { if (isLeaf) { #ifndef NETWORKIT_OMP2 #pragma omp task #endif { index p = offset; std::generate(content.begin(), content.end(), [&p](){return p++;}); } offset += size(); } else { for (int i = 0; i < 4; i++) { offset = children[i].reindex(offset); } } return offset; } }; } #endif /* QUADNODE_H_ */
Renderer.h
#pragma once #include <Utils.h> #include <Player.h> #include <Raycaster.h> class Renderer { public: Renderer(Player* player, Field* field, Raycaster* raycaster, bool useMP, bool skipPixels) : player(player), field(field), raycaster(raycaster), useMP(useMP), skipPixels(skipPixels) {} void FillPixel(glm::vec4& pos, glm::vec4& v, uint8_t* buffer, int index) { glm::u8vec3 pixel(250, 250, 250); float dist; raycaster->FindPixel(pos, v, pixel, dist); //// distance fog //static const int FOG_RANGE = 30; //static const int FOG_STRENGH = 120; // 0 - 255 //int t = std::min(int(dist * FOG_RANGE), FOG_STRENGH); //pixel.x = std::min(t + pixel.x, 255); //pixel.y = std::min(t + pixel.y, 255); //pixel.z = std::min(t + pixel.z, 255); buffer[index * 4 ] = pixel.x; buffer[index * 4 + 1] = pixel.y; buffer[index * 4 + 2] = pixel.z; buffer[index * 4 + 3] = 255; } void FillPixelAtXY(uint8_t* buffer, const int x, const int y, const int viewWidth, const int viewHeight, const int skipEven) { int index = y*viewWidth + x; int W2 = viewWidth / 2; int H2 = viewHeight / 2; if (skipPixels) if (y % 2 == 0) if (x % 2 == skipEven) return; else if (x % 2 == (skipEven == 0 ? 1 : 0)) return; float dY = (float(y - H2) / W2); float dX = (float(x - W2) / W2); glm::vec4 rayDy = player->vy * dY; glm::vec4 rayDx = player->vz * dX; glm::vec4 raycastVec = player->vx + rayDy + rayDx; FillPixel(player->pos, raycastVec, buffer, index); } void ThreadedCycle(uint8_t* buffer, const int viewWidth, const int viewHeight, const int skipEven) { #pragma omp parallel for for (int x = 0; x < viewWidth; x++) for (int y = 0; y < viewHeight; y++) { FillPixelAtXY(buffer, x, y, viewWidth, viewHeight, skipEven); } } void SimpleCycle(uint8_t* buffer, const int viewWidth, const int viewHeight, const int skipEven) { for (int x = 0; x < viewWidth; x++) for (int y = 0; y < viewHeight; y++) { FillPixelAtXY(buffer, x, y, viewWidth, viewHeight, skipEven); } } void FillTexData(uint8_t* buffer, const int viewWidth, const int viewHeight) { static int skipEven = 0; if (useMP) ThreadedCycle(buffer, viewWidth, viewHeight, skipEven); else SimpleCycle(buffer, viewWidth, viewHeight, skipEven); skipEven = skipEven == 0 ? 1 : 0; } bool useMP; bool skipPixels; Player* player = nullptr; Field* field = nullptr; Raycaster* raycaster = nullptr; };
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/resize.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict chop_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const PixelPacket *restrict p; register IndexPacket *restrict chop_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t i; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); cmyk_images=NewImageList(); for (i=0; i < (ssize_t) GetImageListLength(images); i+=4) { cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace); image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p))); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(cmyk_view); for (x=0; x < (ssize_t) images->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange- GetPixelIntensity(images,p))); p++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); AppendImageToList(&cmyk_images,cmyk_image); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) || ((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,crop_image,1,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict crop_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view); (void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (crop_indexes != (IndexPacket *) NULL)) (void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns* sizeof(*crop_indexes)); if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict excerpt_indexes, *restrict indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view); if (excerpt_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(excerpt_indexes,indexes,(size_t) excerpt_image->columns*sizeof(*excerpt_indexes)); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(extent_image,DirectClass) == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } if (extent_image->background_color.opacity != OpaqueOpacity) extent_image->matte=MagickTrue; (void) SetImageBackgroundColor(extent_image); (void) CompositeImage(extent_image,image->compose,image,-geometry->x, -geometry->y); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flip_image,1,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict flip_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewVirtualIndexQueue(image_view); if (indexes != (const IndexPacket *) NULL) { flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view); if (flip_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns* sizeof(*flip_indexes)); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flop_image,1,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict flop_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=flop_image->columns; indexes=GetCacheViewVirtualIndexQueue(image_view); flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view); for (x=0; x < (ssize_t) flop_image->columns; x++) { (*--q)=(*p++); if ((indexes != (const IndexPacket *) NULL) && (flop_indexes != (IndexPacket *) NULL)) SetPixelIndex(flop_indexes+flop_image->columns-x-1, GetPixelIndex(indexes+x)); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyImageRegion(Image *destination, const Image *source,const size_t columns,const size_t rows, const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy, ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict destination_indexes; register PixelPacket *restrict q; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source_view); (void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p)); if (indexes != (IndexPacket *) NULL) { destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); if (destination_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(destination_indexes,indexes,(size_t) columns*sizeof(*indexes)); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse) { InheritException(exception,&splice_image->exception); splice_image=DestroyImage(splice_image); return((Image *) NULL); } (void) SetImageBackgroundColor(splice_image); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case StaticGravity: case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes, *restrict splice_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < splice_geometry.x; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes, *restrict splice_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, image->columns,1,exception); if ((y < 0) || (y >= (ssize_t) splice_image->rows)) continue; q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < splice_geometry.x; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ /* DANGER: This function destroys what it assumes to be a single image list. If the input image is part of a larger list, all other images in that list will be simply 'lost', not destroyed. Also if the crop generates a list of images only the first image is resized. And finally if the crop succeeds and the resize failed, you will get a cropped image, as well as a 'false' or 'failed' report. This function and should probably be depreciated in favor of direct calls to CropImageToTiles() or ResizeImage(), as appropriate. */ MagickExport MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry) { Image *resize_image, *transform_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ flags=ParseRegionGeometry(transform_image,image_geometry,&geometry, &(*image)->exception); (void) flags; if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,transform_image->blur,&(*image)->exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImages() calls TransformImage() on each image of a sequence. % % The format of the TransformImage method is: % % MagickBooleanType TransformImages(Image **image, % const char *crop_geometry,const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ MagickExport MagickBooleanType TransformImages(Image **images, const char *crop_geometry,const char *image_geometry) { Image *image, **image_list, *transform_images; MagickStatusType status; register ssize_t i; assert(images != (Image **) NULL); assert((*images)->signature == MagickSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); image_list=ImageListToArray(*images,&(*images)->exception); if (image_list == (Image **) NULL) return(MagickFalse); status=MagickTrue; transform_images=NewImageList(); for (i=0; image_list[i] != (Image *) NULL; i++) { image=image_list[i]; status&=TransformImage(&image,crop_geometry,image_geometry); AppendImageToList(&transform_images,image); } *images=transform_images; image_list=(Image **) RelinquishMagickMemory(image_list); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict transpose_indexes, *restrict indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view); if (transpose_indexes != (IndexPacket *) NULL) (void) CopyMagickMemory(transpose_indexes,indexes,(size_t) image->columns*sizeof(*transpose_indexes)); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register IndexPacket *restrict transverse_indexes, *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y- 1),0,1,transverse_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view); if (transverse_indexes != (IndexPacket *) NULL) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(transverse_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_01__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_03__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16) // A*D function (colscale): GB (_AxD__minus_int16) // D*A function (rowscale): GB (_DxB__minus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16) // C=scalar+B GB (_bind1st__minus_int16) // C=scalar+B' GB (_bind1st_tran__minus_int16) // C=A+scalar GB (_bind2nd__minus_int16) // C=A'+scalar GB (_bind2nd_tran__minus_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log10_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log10_fc64_fc64) // op(A') function: GB (_unop_tran__log10_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog10 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog10 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog10 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log10_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog10 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog10 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log10_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
random.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <stdbool.h> #include <string.h> #include <omp.h> #ifndef TOTAL_SEATS #define TOTAL_SEATS 20 #endif #ifndef TOTAL_THREADS #define TOTAL_THREADS 4 #endif int randomAssignment() { return rand() % TOTAL_SEATS; } int main(int argc, char **argv) { omp_set_num_threads(TOTAL_THREADS); srand(time(0)); bool seats[TOTAL_SEATS]; memset(seats, false, TOTAL_SEATS); int wrongly_assigned = 0; #pragma omp parallel { #pragma omp master { int seatAssigned = randomAssignment(); if (seatAssigned != 0) { wrongly_assigned++; } seats[seatAssigned] = true; } #pragma omp barrier #pragma omp for ordered for (int i = 1; i < TOTAL_SEATS; i++) { int seatAssigned; #pragma omp critical { if (!seats[i]) { seats[i] = true; } else { seatAssigned = randomAssignment(); while (seats[seatAssigned]) { seatAssigned = randomAssignment(); } printf("Person %d assigned %d\n", i, seatAssigned); seats[seatAssigned] = true; wrongly_assigned++; } } } } float correctly_seated = 100 * (TOTAL_SEATS - wrongly_assigned) / (float)TOTAL_SEATS; float incorrectly_seated = 100 - correctly_seated; printf("Correctly : %f\n", correctly_seated); printf("Incorrectly : %f\n", incorrectly_seated); return 0; }
pi.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main( int argc, char **argv ) { long i, num_steps = 1000000000; double step, x, sum, pi, taken; double start, stop; int num_threads = omp_get_max_threads(); if (argc > 1) { num_steps = atol(argv[1]); } printf("Calculating PI using:\n" " %ld slices\n" " %d thread(s)\n", num_steps, num_threads); start = omp_get_wtime(); sum = 0.0; step = 1.0 / num_steps; #pragma omp parallel for private(x) reduction(+:sum) for (i=0;i<num_steps;i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x*x); } pi = sum * step; stop = omp_get_wtime(); taken = ((double)(stop - start)); printf("Obtained value for PI: %.16g\n" "Time taken: %.16g seconds\n", pi, taken); return 0; }
SpatialFullConvolutionMap.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFullConvolutionMap.c" #else void THNN_(SpatialFullConvolutionMap_updateOutput)( THNNState *state, THTensor *input, THTensor *output_, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH) { THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); THArgCheck( weight != NULL && weight->nDimension == 3 && connTable != NULL && connTable->size[0] == weight->size[0], 4, "3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE ); const int kH = (int)weight->size[1]; const int kW = (int)weight->size[2]; THArgCheck(input != NULL && input->nDimension == 3, 2, "3D tensor expected"); THArgCheck(input->size[0] >= nInputPlane, 2, "invalid number of input planes"); THTensor_(resize3d)( output_, nOutputPlane, (input->size[1] - 1) * dH + kH, (input->size[2] - 1) * dW + kW ); /* contiguous */ input = THTensor_(newContiguous)(input); THTensor* output = THTensor_(newContiguous)(output_); /* get raw pointers */ real *input_data = THTensor_(data)(input); real *output_data = THTensor_(data)(output); real *weight_data = THTensor_(data)(weight); real *bias_data = THTensor_(data)(bias); real *connTable_data = THTensor_(data)(connTable); /* and dims */ const int64_t input_h = input->size[1]; const int64_t input_w = input->size[2]; const int64_t output_h = output->size[1]; const int64_t output_w = output->size[2]; const int64_t weight_h = weight->size[1]; const int64_t weight_w = weight->size[2]; int64_t p; #pragma omp parallel for private(p) for (p = 0; p < nOutputPlane; p++) { /* add bias */ real *ptr_output = output_data + p*output_w*output_h; int64_t j; int nweight; int64_t k; for (j = 0; j < output_h*output_w; j++) ptr_output[j] = bias_data[p]; /* convolve all maps */ nweight = connTable->size[0]; for (k = 0; k < nweight; k++) { /* get offsets for input/output */ int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; if (o == p) { THTensor_(fullConv2Dptr)( output_data + o*output_w*output_h, 1.0, input_data + i*input_w*input_h, input_h, input_w, weight_data + k*weight_w*weight_h, weight_h, weight_w, dH, dW ); } } } /* clean up */ THTensor_(free)(input); THTensor_(freeCopyTo)(output, output_); } void THNN_(SpatialFullConvolutionMap_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput_, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH) { THArgCheck( weight != NULL && weight->nDimension == 3 && connTable != NULL && connTable->size[0] == weight->size[0], 5, "3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE ); /* contiguous */ THTensor* gradInput = THTensor_(newContiguous)(gradInput_); gradOutput = THTensor_(newContiguous)(gradOutput); /* Resize/Zero */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); /* get raw pointers */ real *gradInput_data = THTensor_(data)(gradInput); real *gradOutput_data = THTensor_(data)(gradOutput); real *weight_data = THTensor_(data)(weight); real *connTable_data = THTensor_(data)(connTable); /* and dims */ const int64_t input_h = input->size[1]; const int64_t input_w = input->size[2]; const int64_t output_h = gradOutput->size[1]; const int64_t output_w = gradOutput->size[2]; const int64_t kH = weight->size[1]; const int64_t kW = weight->size[2]; int64_t p; #pragma omp parallel for private(p) for (p = 0; p < nInputPlane; p++) { int64_t k; /* backward all */ int nkernel = connTable->size[0]; for (k = 0; k < nkernel; k++) { int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; if (i == p) { /* gradient to input */ THTensor_(validXCorr2Dptr)( gradInput_data + i*input_w*input_h, 1.0, gradOutput_data + o*output_w*output_h, output_h, output_w, weight_data + k*kW*kH, kH, kW, dH, dW ); } } } /* clean up */ THTensor_(freeCopyTo)(gradInput, gradInput_); THTensor_(free)(gradOutput); } void THNN_(SpatialFullConvolutionMap_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THArgCheck( gradWeight != NULL && gradWeight->nDimension == 3 && connTable != NULL && connTable->size[0] == gradWeight->size[0], 5, "3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE ); /* contiguous */ input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); /* get raw pointers */ real *input_data = THTensor_(data)(input); real *gradOutput_data = THTensor_(data)(gradOutput); real *gradWeight_data = THTensor_(data)(gradWeight); real *gradBias_data = THTensor_(data)(gradBias); /* and dims */ const int64_t input_h = input->size[1]; const int64_t input_w = input->size[2]; const int64_t output_h = gradOutput->size[1]; const int64_t output_w = gradOutput->size[2]; const int64_t weight_h = gradWeight->size[1]; const int64_t weight_w = gradWeight->size[2]; /* gradients wrt bias */ int64_t k; #pragma omp parallel for private(k) for (k = 0; k < nOutputPlane; k++) { real *ptr_gradOutput = gradOutput_data + k*output_w*output_h; int64_t l; for (l = 0; l < output_h*output_w; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; } /* gradients wrt weight */ int nkernel = connTable->size[0]; #pragma omp parallel for private(k) for (k = 0; k < nkernel; k++) { int o = (int)THTensor_(get2d)(connTable,k,1) - TH_INDEX_BASE; int i = (int)THTensor_(get2d)(connTable,k,0) - TH_INDEX_BASE; /* gradient to kernel */ THTensor_(validXCorr2DRevptr)( gradWeight_data + k*weight_w*weight_h, scale, gradOutput_data + o*output_w*output_h, output_h, output_w, input_data + i*input_w*input_h, input_h, input_w, dH, dW ); } /* clean up */ THTensor_(free)(input); THTensor_(free)(gradOutput); } #endif
GB_binop__land_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__land_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__land_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__land_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint8) // A*D function (colscale): GB (_AxD__land_uint8) // D*A function (rowscale): GB (_DxB__land_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__land_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__land_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint8) // C=scalar+B GB (_bind1st__land_uint8) // C=scalar+B' GB (_bind1st_tran__land_uint8) // C=A+scalar GB (_bind2nd__land_uint8) // C=A'+scalar GB (_bind2nd_tran__land_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT8 || GxB_NO_LAND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace); else (void) SetImageColorspace(combine_image,sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p))); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,sRGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport Image *SeparateImage(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *separate_image; MagickBooleanType status; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); status=SeparateImageChannel(separate_image,channel); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte=MagickFalse; (void) SetImageColorspace(image,GRAYColorspace); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); exception=(&image->exception); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte=MagickTrue; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte=MagickFalse; break; } case DisassociateAlphaChannel: { status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image->matte=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha=QuantumScale*GetPixelAlpha(q); gamma=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void) memset(&pixel,0,sizeof(pixel)); index=0; SetPixelPacket(image,&background,&pixel,&index); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(8*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(8*t3+Nx-5,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),128*t4+126);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
flush-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-gimple" } */ void f1(void) { #pragma omp flush } int x, y, z; void f2(_Bool p) { if (p) { #pragma omp flush (x) } else { #pragma omp flush (x, y, z) } } /* { dg-final { scan-tree-dump-times "__sync_synchronize" 3 "gimple" } } */ /* { dg-final { cleanup-tree-dump "gimple" } } */
LUT.h
/* * LUT.h * This file is part of RawTherapee. * * Copyright (c) 2011 Jan Rinze Peterzon (janrinze@gmail.com) * * RawTherapee is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * RawTherapee is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with RawTherapee. If not, see <https://www.gnu.org/licenses/>. */ /* * Declaration of flexible Lookup Tables * * Usage: * * LUT<type> name (size); * LUT<type> name (size, flags); * * creates an array which is valid within the normal C/C++ scope "{ ... }" * * access to elements is a simple as: * * LUT<float> my_lut (10); * float value = my_lut[3]; * float value = my_lut[2.5]; // this will interpolate * * when using a float type index it will interpolate the lookup values * * extra setting in flags: (clipping is set by default) * LUT_CLIP_ABOVE * LUT_CLIP_BELOW * * example: * LUT<float> my_lut (10,LUT_CLIP_BELOW); * float value = my_lut[22.5]; // this will extrapolate * float value = my_lut[-22.5]; // this will not extrapolate * * LUT<float> my_lut (10,0); // this will extrapolate on either side * * shotcuts: * * LUTf stands for LUT<float> * LUTi stands for LUT<int> * LUTu stands for LUT<unsigned int> * LUTd stands for LUT<double> * LUTuc stands for LUT<unsigned char> */ #pragma once #include <algorithm> #include <cstring> #include <cstdint> #include <cassert> #include <vector> #ifndef NDEBUG #include <fstream> #endif #include "opthelper.h" #include "rt_math.h" // Bit representations of flags enum { LUT_CLIP_OFF, // LUT does not clip input values LUT_CLIP_BELOW, // LUT clips input values at lower bound LUT_CLIP_ABOVE // LUT clips input values at upper bound }; template<typename T> class LUT; using LUTf = LUT<float>; using LUTi = LUT<int32_t>; using LUTu = LUT<uint32_t>; using LUTd = LUT<double>; using LUTuc = LUT<uint8_t>; template<typename T> class LUT { protected: // list of variables ordered to improve cache speed int maxs; float maxsf; T * data; unsigned int clip; unsigned int size; unsigned int upperBound; // always equals size-1, parameter created for performance reason private: unsigned int owner; #ifdef __SSE2__ alignas(16) vfloat maxsv; alignas(16) vfloat sizev; alignas(16) vint sizeiv; #endif public: /// convenience flag! If one doesn't want to delete the buffer but want to flag it to be recomputed... /// The user have to handle it itself, even if some method can (re)initialize it bool dirty; explicit LUT(int s, int flags = LUT_CLIP_BELOW | LUT_CLIP_ABOVE, bool initZero = false) { #ifndef NDEBUG if (s <= 0) { printf("s<=0!\n"); } assert (s > 0); #endif dirty = true; clip = flags; // Add a few extra elements so [](vfloat) won't access out-of-bounds memory. // The routine would still produce the right answer, but might cause issues // with address/heap checking programs. data = new T[s + 3]; owner = 1; size = s; upperBound = size - 1; maxs = size - 2; maxsf = (float)maxs; #ifdef __SSE2__ maxsv = F2V( maxs ); sizeiv = _mm_set1_epi32( (int)(size - 1) ); sizev = F2V( size - 1 ); #endif if (initZero) { clear(); } } explicit LUT(const std::vector<T>& input, int flags = LUT_CLIP_BELOW | LUT_CLIP_ABOVE) : maxs(input.size() - 2), maxsf(maxs), data(new T[input.size() + 3]), // Add a few extra elements so [](vfloat) won't access out-of-bounds memory. clip(flags), size(input.size()), upperBound(size - 1), owner(1), #ifdef __SSE2__ maxsv(F2V(maxs)), sizev(F2V(size - 1)), sizeiv(_mm_set1_epi32(size - 1)), #endif dirty(true) { #ifndef NDEBUG if (input.empty()) { printf("s=0!\n"); } assert(!input.empty()); #endif std::copy_n(input.begin(), input.size(), data); } void operator ()(int s, int flags = LUT_CLIP_BELOW | LUT_CLIP_ABOVE, bool initZero = false) { #ifndef NDEBUG if (s <= 0) { printf("s<=0!\n"); } assert (s > 0); #endif if (owner && data) { delete[] data; } dirty = true; // Assumption! clip = flags; // See comment in constructor. data = new T[s + 3]; owner = 1; size = s; upperBound = size - 1; maxs = size - 2; maxsf = (float)maxs; #ifdef __SSE2__ maxsv = F2V( maxs ); sizeiv = _mm_set1_epi32( (int)(size - 1) ); sizev = F2V( size - 1 ); #endif if (initZero) { clear(); } } LUT() { data = nullptr; reset(); #ifdef __SSE2__ maxsv = ZEROV; sizev = ZEROV; sizeiv = _mm_setzero_si128(); #endif } ~LUT() { if (owner) { delete[] data; #ifndef NDEBUG data = (T*)0xBAADF00D; #endif } } explicit LUT(const LUT&) = delete; void setClip(int flags) { clip = flags; } int getClip() const { return clip; } /** @brief Get the number of element in the LUT (i.e. dimension of the array) * For a LUT(500), it will return 500 * @return number of element in the array */ unsigned int getSize() const { return size; } /** @brief Get the highest value possible (i.e. dimension of the array) * For a LUT(500), it will return 499, because 500 elements, starting from 0, goes up to 499 * @return number of element in the array */ unsigned int getUpperBound() const { return size > 0 ? upperBound : 0; } LUT<T>& operator=(const LUT<T>& rhs) { if (this != &rhs) { if (rhs.size > this->size) { delete [] this->data; this->data = nullptr; } if (this->data == nullptr) { // See comment in constructor. this->data = new T[rhs.size + 3]; } this->clip = rhs.clip; this->owner = 1; memcpy(this->data, rhs.data, rhs.size * sizeof(T)); this->size = rhs.size; this->upperBound = rhs.upperBound; this->maxs = this->size - 2; this->maxsf = (float)this->maxs; #ifdef __SSE2__ this->maxsv = F2V( this->size - 2); this->sizeiv = _mm_set1_epi32( (int)(this->size - 1) ); this->sizev = F2V( this->size - 1 ); #endif } return *this; } // handy to sum up per thread histograms. #pragma omp simd speeds up the loop by about factor 3 for LUTu (uint32_t). LUT<T>& operator+=(const LUT<T>& rhs) { if (rhs.size == this->size) { #ifdef _OPENMP #pragma omp simd #endif for(unsigned int i = 0; i < this->size; i++) { data[i] += rhs.data[i]; } } return *this; } // multiply all elements of LUT<float> with a constant float value template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> LUT<float>& operator*=(float factor) { #ifdef _OPENMP #pragma omp simd #endif for(unsigned int i = 0; i < this->size; i++) { data[i] *= factor; } return *this; } // divide all elements of LUT<float> by a constant float value template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> LUT<float>& operator/=(float divisor) { #ifdef _OPENMP #pragma omp simd #endif for(unsigned int i = 0; i < this->size; i++) { data[i] /= divisor; } return *this; } // use with integer indices T& operator[](int index) const { return data[ rtengine::LIM<int>(index, 0, upperBound) ]; } #ifdef __SSE2__ // NOTE: This function requires LUTs which clips only at lower bound vfloat cb(vfloat indexv) const { static_assert(std::is_same<T, float>::value, "This method only works for float LUTs"); // Clamp and convert to integer values. Extract out of SSE register because all // lookup operations use regular addresses. vfloat clampedIndexes = vclampf(indexv, ZEROV, maxsv); // this automagically uses ZEROV in case indexv is NaN vint indexes = _mm_cvttps_epi32(clampedIndexes); int indexArray[4]; _mm_storeu_si128(reinterpret_cast<__m128i*>(&indexArray[0]), indexes); // Load data from the table. This reads more than necessary, but there don't seem // to exist more granular operations (though we could try non-SSE). // Cast to int for convenience in the next operation (partial transpose). vint values[4]; for (int i = 0; i < 4; ++i) { values[i] = _mm_castps_si128(LVFU(data[indexArray[i]])); } // Partial 4x4 transpose operation. We want two new vectors, the first consisting // of [values[0][0] ... values[3][0]] and the second [values[0][1] ... values[3][1]]. __m128i temp0 = _mm_unpacklo_epi32(values[0], values[1]); __m128i temp1 = _mm_unpacklo_epi32(values[2], values[3]); vfloat lowerVal = _mm_castsi128_ps(_mm_unpacklo_epi64(temp0, temp1)); vfloat upperVal = _mm_castsi128_ps(_mm_unpackhi_epi64(temp0, temp1)); vfloat diff = vmaxf(ZEROV, indexv) - _mm_cvtepi32_ps(indexes); return vintpf(diff, upperVal, lowerVal); } // NOTE: This version requires LUTs which clip at upper and lower bounds // (which is the default). vfloat operator[](vfloat indexv) const { static_assert(std::is_same<T, float>::value, "This method only works for float LUTs"); // Clamp and convert to integer values. Extract out of SSE register because all // lookup operations use regular addresses. vfloat clampedIndexes = vclampf(indexv, ZEROV, maxsv); // this automagically uses ZEROV in case indexv is NaN vint indexes = _mm_cvttps_epi32(clampedIndexes); int indexArray[4]; _mm_storeu_si128(reinterpret_cast<__m128i*>(&indexArray[0]), indexes); // Load data from the table. This reads more than necessary, but there don't seem // to exist more granular operations (though we could try non-SSE). // Cast to int for convenience in the next operation (partial transpose). vint values[4]; for (int i = 0; i < 4; ++i) { values[i] = _mm_castps_si128(LVFU(data[indexArray[i]])); } // Partial 4x4 transpose operation. We want two new vectors, the first consisting // of [values[0][0] ... values[3][0]] and the second [values[0][1] ... values[3][1]]. __m128i temp0 = _mm_unpacklo_epi32(values[0], values[1]); __m128i temp1 = _mm_unpacklo_epi32(values[2], values[3]); vfloat lowerVal = _mm_castsi128_ps(_mm_unpacklo_epi64(temp0, temp1)); vfloat upperVal = _mm_castsi128_ps(_mm_unpackhi_epi64(temp0, temp1)); vfloat diff = vclampf(indexv, ZEROV, sizev) - _mm_cvtepi32_ps(indexes); // this automagically uses ZEROV in case indexv is NaN return vintpf(diff, upperVal, lowerVal); } // NOTE: This version requires LUTs which do not clip at upper and lower bounds vfloat operator()(vfloat indexv) const { static_assert(std::is_same<T, float>::value, "This method only works for float LUTs"); // Clamp and convert to integer values. Extract out of SSE register because all // lookup operations use regular addresses. vfloat clampedIndexes = vclampf(indexv, ZEROV, maxsv); // this automagically uses ZEROV in case indexv is NaN vint indexes = _mm_cvttps_epi32(clampedIndexes); int indexArray[4]; _mm_storeu_si128(reinterpret_cast<__m128i*>(&indexArray[0]), indexes); // Load data from the table. This reads more than necessary, but there don't seem // to exist more granular operations (though we could try non-SSE). // Cast to int for convenience in the next operation (partial transpose). vint values[4]; for (int i = 0; i < 4; ++i) { values[i] = _mm_castps_si128(LVFU(data[indexArray[i]])); } // Partial 4x4 transpose operation. We want two new vectors, the first consisting // of [values[0][0] ... values[3][0]] and the second [values[0][1] ... values[3][1]]. __m128i temp0 = _mm_unpacklo_epi32(values[0], values[1]); __m128i temp1 = _mm_unpacklo_epi32(values[2], values[3]); vfloat lowerVal = _mm_castsi128_ps(_mm_unpacklo_epi64(temp0, temp1)); vfloat upperVal = _mm_castsi128_ps(_mm_unpackhi_epi64(temp0, temp1)); vfloat diff = indexv - _mm_cvtepi32_ps(indexes); return vintpf(diff, upperVal, lowerVal); } // vectorized LUT access with integer indices. Clips at lower and upper bounds #ifdef __SSE4_1__ template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> vfloat operator[](vint idxv) const { idxv = _mm_max_epi32( _mm_setzero_si128(), _mm_min_epi32(idxv, sizeiv)); // access the LUT 4 times. Trust the compiler. It generates good code here, better than hand written SSE code return _mm_setr_ps(data[_mm_extract_epi32(idxv,0)], data[_mm_extract_epi32(idxv,1)], data[_mm_extract_epi32(idxv,2)], data[_mm_extract_epi32(idxv,3)]); } #else template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> vfloat operator[](vint idxv) const { // convert to float because SSE2 has no min/max for 32bit integers vfloat tempv = vclampf(_mm_cvtepi32_ps(idxv), ZEROV, sizev); // this automagically uses ZEROV in case idxv is NaN (which will never happen because it is a vector of int) idxv = _mm_cvttps_epi32(tempv); // access the LUT 4 times. Trust the compiler. It generates good code here, better than hand written SSE code return _mm_setr_ps(data[_mm_cvtsi128_si32(idxv)], data[_mm_cvtsi128_si32(_mm_shuffle_epi32(idxv, _MM_SHUFFLE(1, 1, 1, 1)))], data[_mm_cvtsi128_si32(_mm_shuffle_epi32(idxv, _MM_SHUFFLE(2, 2, 2, 2)))], data[_mm_cvtsi128_si32(_mm_shuffle_epi32(idxv, _MM_SHUFFLE(3, 3, 3, 3)))]); } #endif #endif // use with float indices template<typename U = T, typename V, typename = typename std::enable_if<std::is_floating_point<V>::value && std::is_same<U, float>::value>::type> T operator[](V index) const { int idx = (int)index; // don't use floor! The difference in negative space is no problems here if (index < 0.f) { if (clip & LUT_CLIP_BELOW) { return data[0]; } idx = 0; } else if (index > maxsf) { if (clip & LUT_CLIP_ABOVE) { return data[upperBound]; } idx = maxs; } float diff = index - (float) idx; T p1 = data[idx]; T p2 = data[idx + 1] - p1; return (p1 + p2 * diff); } // Return the value for "index" that is in the [0-1] range. template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> T getVal01(float index) const { index *= (float)upperBound; int idx = (int)index; // don't use floor! The difference in negative space is no problems here if (index < 0.f) { if (clip & LUT_CLIP_BELOW) { return data[0]; } idx = 0; } else if (index > maxsf) { if (clip & LUT_CLIP_ABOVE) { return data[upperBound]; } idx = maxs; } float diff = index - (float) idx; T p1 = data[idx]; T p2 = data[idx + 1] - p1; return (p1 + p2 * diff); } operator bool() const // FIXME: Should be explicit { return size > 0; } void clear() { if (data && size) { memset(data, 0, size * sizeof(T)); } } void reset() { if (data) { delete[] data; } dirty = true; data = nullptr; owner = 1; size = 0; upperBound = 0; maxs = 0; maxsf = 0.f; clip = 0; } // create an identity LUT (LUT(x) = x) or a scaled identity LUT (LUT(x) = x / divisor) template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> void makeIdentity(float divisor = 1.f) { if(divisor == 1.f) { for(unsigned int i = 0; i < size; i++) { data[i] = i; } } else { for(unsigned int i = 0; i < size; i++) { data[i] = i / divisor; } } } // compress a LUT<uint32_t> with size y into a LUT<uint32_t> with size x (y>x) template<typename U = T, typename = typename std::enable_if<std::is_same<U, std::uint32_t>::value>::type> void compressTo(LUT<T> &dest, unsigned int numVals = 0) const { numVals = numVals == 0 ? size : numVals; numVals = std::min(numVals, size); float divisor = numVals - 1; float mult = (dest.size - 1) / divisor; for (unsigned int i = 0; i < numVals; i++) { int hi = (int)(mult * i); dest.data[hi] += this->data[i] ; } } // compress a LUT<uint32_t> with size y into a LUT<uint32_t> with size x (y>x) by using the passThrough LUT to calculate indexes template<typename U = T, typename = typename std::enable_if<std::is_same<U, std::uint32_t>::value>::type> void compressTo(LUT<T> &dest, unsigned int numVals, const LUT<float> &passThrough) const { if(passThrough) { numVals = std::min(numVals, size); numVals = std::min(numVals, passThrough.getSize()); float mult = dest.size - 1; for (unsigned int i = 0; i < numVals; i++) { int hi = (int)(mult * passThrough[i]); dest[hi] += this->data[i] ; } } } // compute sum and average of a LUT<uint32_t> template<typename U = T, typename = typename std::enable_if<std::is_same<U, std::uint32_t>::value>::type> void getSumAndAverage(float &sum, float &avg) const { sum = 0.f; avg = 0.f; int i = 0; #ifdef __SSE2__ vfloat iv = _mm_set_ps(3.f, 2.f, 1.f, 0.f); vfloat fourv = F2V(4.f); vint sumv = (vint)ZEROV; vfloat avgv = ZEROV; for(; i < static_cast<int>(size) - 3; i += 4) { vint datav = _mm_loadu_si128((__m128i*)&data[i]); sumv += datav; avgv += iv * _mm_cvtepi32_ps(datav); iv += fourv; } sum = vhadd(_mm_cvtepi32_ps(sumv)); avg = vhadd(avgv); #endif for (; i < static_cast<int>(size); i++) { T val = data[i]; sum += val; avg += i * val; } avg /= sum; } template<typename U = T, typename = typename std::enable_if<std::is_same<U, float>::value>::type> void makeConstant(float value, unsigned int numVals = 0) { numVals = numVals == 0 ? size : numVals; numVals = std::min(numVals, size); for(unsigned int i = 0; i < numVals; i++) { data[i] = value; } } // share the buffer with another LUT, handy for same data but different clip flags void share(const LUT<T> &source, int flags = LUT_CLIP_BELOW | LUT_CLIP_ABOVE) { if (owner && data) { delete[] data; } dirty = false; // Assumption clip = flags; data = source.data; owner = 0; size = source.getSize(); upperBound = size - 1; maxs = size - 2; maxsf = (float)maxs; #ifdef __SSE2__ maxsv = F2V( size - 2); sizeiv = _mm_set1_epi32( (int)(size - 1) ); sizev = F2V( size - 1 ); #endif } };
convolution_winograd_transform_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24)); float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); vst1q_f32(tmp[7][m], _tmp7m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); vst1_u16(output0 + 16, vcvt_bf16_f32(_out04)); vst1_u16(output0 + 20, vcvt_bf16_f32(_out05)); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); output0 += outw * 4; } } } } }
rowwise_pick.h
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/rowwise_pick.h * \brief Template implementation for rowwise pick operators. */ #ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_ #define DGL_ARRAY_CPU_ROWWISE_PICK_H_ #include <dgl/array.h> #include <functional> namespace dgl { namespace aten { namespace impl { // User-defined function for picking elements from one row. // // The column indices of the given row are stored in // [col + off, col + off + len) // // Similarly, the data indices are stored in // [data + off, data + off + len) // Data index pointer could be NULL, which means data[i] == i // // *ATTENTION*: This function will be invoked concurrently. Please make sure // it is thread-safe. // // \param rowid The row to pick from. // \param off Starting offset of this row. // \param len NNZ of the row. // \param col Pointer of the column indices. // \param data Pointer of the data indices. // \param out_idx Picked indices in [off, off + len). template <typename IdxType> using PickFn = std::function<void( IdxType rowid, IdxType off, IdxType len, const IdxType* col, const IdxType* data, IdxType* out_idx)>; // Template for picking non-zero values row-wise. The implementation utilizes // OpenMP parallelization on rows because each row performs computation independently. template <typename IdxType> COOMatrix CSRRowWisePick(CSRMatrix mat, IdArray rows, int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) { using namespace aten; const IdxType* indptr = static_cast<IdxType*>(mat.indptr->data); const IdxType* indices = static_cast<IdxType*>(mat.indices->data); const IdxType* data = CSRHasData(mat)? static_cast<IdxType*>(mat.data->data) : nullptr; const IdxType* rows_data = static_cast<IdxType*>(rows->data); const int64_t num_rows = rows->shape[0]; const auto& ctx = mat.indptr->ctx; // To leverage OMP parallelization, we create two arrays to store // picked src and dst indices. Each array is of length num_rows * num_picks. // For rows whose nnz < num_picks, the indices are padded with -1. // // We check whether all the given rows // have at least num_picks number of nnz when replace is false. // // If the check holds, remove -1 elements by remove_if operation, which simply // moves valid elements to the head of arrays and create a view of the original // array. The implementation consumes a little extra memory than the actual requirement. // // Otherwise, directly use the row and col arrays to construct the result COO matrix. // // [02/29/2020 update]: OMP is disabled for now since batch-wise parallelism is more // significant. (minjie) IdArray picked_row = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdArray picked_col = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdArray picked_idx = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdxType* picked_rdata = static_cast<IdxType*>(picked_row->data); IdxType* picked_cdata = static_cast<IdxType*>(picked_col->data); IdxType* picked_idata = static_cast<IdxType*>(picked_idx->data); bool all_has_fanout = true; #pragma omp parallel for reduction(&&:all_has_fanout) for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; const IdxType len = indptr[rid + 1] - indptr[rid]; // If a node has no neighbor then all_has_fanout must be false even if replace is // true. all_has_fanout = all_has_fanout && (len >= (replace ? 1 : num_picks)); } #pragma omp parallel for for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; CHECK_LT(rid, mat.num_rows); const IdxType off = indptr[rid]; const IdxType len = indptr[rid + 1] - off; if (len == 0) continue; if (len <= num_picks && !replace) { // nnz <= num_picks and w/o replacement, take all nnz for (int64_t j = 0; j < len; ++j) { picked_rdata[i * num_picks + j] = rid; picked_cdata[i * num_picks + j] = indices[off + j]; picked_idata[i * num_picks + j] = data? data[off + j] : off + j; } } else { pick_fn(rid, off, len, indices, data, picked_idata + i * num_picks); for (int64_t j = 0; j < num_picks; ++j) { const IdxType picked = picked_idata[i * num_picks + j]; picked_rdata[i * num_picks + j] = rid; picked_cdata[i * num_picks + j] = indices[picked]; picked_idata[i * num_picks + j] = data? data[picked] : picked; } } } if (!all_has_fanout) { // correct the array by remove_if IdxType* new_row_end = std::remove_if(picked_rdata, picked_rdata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); IdxType* new_col_end = std::remove_if(picked_cdata, picked_cdata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); IdxType* new_idx_end = std::remove_if(picked_idata, picked_idata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); const int64_t new_len = (new_row_end - picked_rdata); CHECK_EQ(new_col_end - picked_cdata, new_len); CHECK_EQ(new_idx_end - picked_idata, new_len); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); } return COOMatrix(mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } // Template for picking non-zero values row-wise. The implementation first slices // out the corresponding rows and then converts it to CSR format. It then performs // row-wise pick on the CSR matrix and rectifies the returned results. template <typename IdxType> COOMatrix COORowWisePick(COOMatrix mat, IdArray rows, int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) { using namespace aten; const auto& csr = COOToCSR(COOSliceRows(mat, rows)); const IdArray new_rows = Range(0, rows->shape[0], rows->dtype.bits, rows->ctx); const auto& picked = CSRRowWisePick<IdxType>(csr, new_rows, num_picks, replace, pick_fn); return COOMatrix(mat.num_rows, mat.num_cols, IndexSelect(rows, picked.row), // map the row index to the correct one picked.col, picked.data); } } // namespace impl } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_ROWWISE_PICK_H_
pmtv.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif #include <string.h> // #define TESTING #define FINAL int main(int argc, char const *argv[]) { if (argc < 2) { fprintf(stderr, "Falta el tamanio de la matriz\n"); exit(-1); } int N = atoi(argv[1]); int **matriz; int *vector; int *sol; int i, j; // INICIALIZACION: asumimos que la matriz es triangular superior matriz = (int **) malloc(N*sizeof(int*)); vector = (int *) malloc(N*sizeof(int)); sol = (int *) malloc(N*sizeof(int)); for (i=0; i<N; i++) matriz[i] = (int*) malloc(N*sizeof(int)); #pragma omp parallel for private(j) schedule(runtime) for (i=0; i<N; i++) { for (j=i; j<N; j++) matriz[i][j] = 3; vector[i] = 5; sol[i] = 0; } #ifdef TESTING printf("Matriz:\n"); for (i=0; i<N; i++) { for (j=0; j<N; j++){ if (j >= i) printf("%d ", matriz[i][j]); else printf("0 "); } printf("\n"); } printf("Vector:\n"); for (i=0; i<N; i++) printf("%d ", vector[i]); printf("\n"); #endif double start, end, tiempo; start = omp_get_wtime(); // OBTENCION DEL VECTOR SOLUCION // con schedule(runtime) el usuario podra escoger la // planificacion que quiera con OMP_SCHEDULE #pragma omp parallel for private(j) schedule(runtime) for (i=0; i<N; i++) for (j=i; j<N; j++) sol[i] += matriz[i][j] * vector[j]; // cogemos la ultima componente del vector sol[N-1] = matriz[N-1][N-1] * vector[N-1]; end = omp_get_wtime(); tiempo = end - start; #ifdef TESTING printf("Resultado:\n"); for (i=0; i<N; i++) printf("%d ", sol[i]); printf("\n"); #endif #ifdef FINAL printf("Tiempo = %11.9f\t Primera = %d\t Ultima=%d\n",tiempo,sol[0],sol[N-1]); #endif return 0; }
GB_binop__bshift_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint32) // C=scalar+B GB (_bind1st__bshift_uint32) // C=scalar+B' GB (_bind1st_tran__bshift_uint32) // C=A+scalar GB (_bind2nd__bshift_uint32) // C=A'+scalar GB (_bind2nd_tran__bshift_uint32) // C type: uint32_t // A type: uint32_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT32 || GxB_NO_BSHIFT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
common.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include "common.h" #define CONF95 1.96 int nthreads = -1; // Number of OpenMP threads int delaylength = -1; // The number of iterations to delay for int outerreps = -1; // Outer repetitions double delaytime = -1.0; // Length of time to delay for in microseconds double targettesttime = 0.0; // The length of time in microseconds that the test // should run for. unsigned long innerreps; // Inner repetitions double *times; // Array of doubles storing the benchmark times in microseconds double referencetime; // The average reference time in microseconds to perform // outerreps runs double referencesd; // The standard deviation in the reference time in // microseconds for outerreps runs. double testtime; // The average test time in microseconds for // outerreps runs double testsd; // The standard deviation in the test time in // microseconds for outerreps runs. void usage(char *argv[]) { printf("Usage: %s.x \n" "\t--outer-repetitions <outer-repetitions> (default %d)\n" "\t--test-time <target-test-time> (default %0.2f microseconds)\n" "\t--delay-time <delay-time> (default %0.4f microseconds)\n" "\t--delay-length <delay-length> " "(default auto-generated based on processor speed)\n", argv[0], DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME); } void parse_args(int argc, char *argv[]) { // Parse the parameters int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--delay-time") == 0.0) { delaytime = atof(argv[++arg]); if (delaytime == 0.0) { printf("Invalid float:--delay-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--outer-repetitions") == 0) { outerreps = atoi(argv[++arg]); if (outerreps == 0) { printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--test-time") == 0) { targettesttime = atof(argv[++arg]); if (targettesttime == 0) { printf("Invalid integer:--test-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } int getdelaylengthfromtime(double delaytime) { int i, reps; double lapsedtime, starttime; // seconds reps = 1000; lapsedtime = 0.0; delaytime = delaytime/1.0E6; // convert from microseconds to seconds // Note: delaytime is local to this function and thus the conversion // does not propagate to the main code. // Here we want to use the delaytime in microseconds to find the // delaylength in iterations. We start with delaylength=0 and // increase until we get a large enough delaytime, return delaylength // in iterations. delaylength = 0; delay(delaylength); while (lapsedtime < delaytime) { delaylength = delaylength * 1.1 + 1; starttime = getclock(); for (i = 0; i < reps; i++) { delay(delaylength); } lapsedtime = (getclock() - starttime) / (double) reps; } return delaylength; } unsigned long getinnerreps(void (*test)(void)) { innerreps = 10L; // some initial value double time = 0.0; while (time < targettesttime) { double start = getclock(); test(); time = (getclock() - start) * 1.0e6; innerreps *=2; // Test to stop code if compiler is optimising reference time expressions away if (innerreps > (targettesttime*1.0e15)) { printf("Compiler has optimised reference loop away, STOP! \n"); printf("Try recompiling with lower optimisation level \n"); exit(1); } } return innerreps; } void printheader(char *name) { printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing %s time using %lu reps\n", name, innerreps); } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i = 1; i <= outerreps; i++) { mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; } meantime = totaltime / outerreps; sumsq = 0; for (i = 1; i <= outerreps; i++) { sumsq += (times[i] - meantime) * (times[i] - meantime); } sd = sqrt(sumsq / (outerreps - 1)); cutoff = 3.0 * sd; nr = 0; for (i = 1; i <= outerreps; i++) { if (fabs(times[i] - meantime) > cutoff) nr++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %f %f %f %f %d\n", outerreps, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; } void printfooter(char *name, double testtime, double testsd, double referencetime, double refsd) { printf("%s time = %f microseconds +/- %f\n", name, testtime, CONF95*testsd); printf("%s overhead = %f microseconds +/- %f\n", name, testtime-referencetime, CONF95*(testsd+referencesd)); } void printreferencefooter(char *name, double referencetime, double referencesd) { printf("%s time = %f microseconds +/- %f\n", name, referencetime, CONF95 * referencesd); } void ompbench_init(int argc, char **argv) { #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } parse_args(argc, argv); if (outerreps == -1) { outerreps = DEFAULT_OUTER_REPS; } if (targettesttime == 0.0) { targettesttime = DEFAULT_TEST_TARGET_TIME; } if (delaytime == -1.0) { delaytime = DEFAULT_DELAY_TIME; } delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations times = malloc((outerreps+1) * sizeof(double)); printf("Running OpenMP benchmark version 3.0\n" "\t%d thread(s)\n" "\t%d outer repetitions\n" "\t%0.2f test time (microseconds)\n" "\t%d delay length (iterations) \n" "\t%f delay time (microseconds)\n", nthreads, outerreps, targettesttime, delaylength, delaytime); } void finalise(void) { free(times); } void initreference(char *name) { printheader(name); } /* Calculate the reference time. */ void reference(char *name, void (*refer)(void)) { int k; double start; // Calculate the required number of innerreps innerreps = getinnerreps(refer); initreference(name); for (k = 0; k <= outerreps; k++) { start = getclock(); refer(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisereference(name); } void finalisereference(char *name) { stats(&referencetime, &referencesd); printreferencefooter(name, referencetime, referencesd); } void intitest(char *name) { printheader(name); } void finalisetest(char *name) { stats(&testtime, &testsd); printfooter(name, testtime, testsd, referencetime, referencesd); } /* Function to run a microbenchmark test*/ void benchmark(char *name, void (*test)(void)) { int k; double start; // Calculate the required number of innerreps innerreps = getinnerreps(test); intitest(name); for (k=0; k<=outerreps; k++) { start = getclock(); test(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisetest(name); } // For the Cray compiler on HECToR we need to turn off optimisation // for the delay and array_delay functions. Other compilers should // not be afffected. #pragma _CRI noopt void delay(int delaylength) { int i; float a = 0.; for (i = 0; i < delaylength; i++) a += i; if (a < 0) printf("%f \n", a); } void array_delay(int delaylength, double a[1]) { int i; a[0] = 1.0; for (i = 0; i < delaylength; i++) a[0] += i; if (a[0] < 0) printf("%f \n", a[0]); } // Re-enable optimisation for remainder of source. #pragma _CRI opt double getclock() { double time; // Returns a value in seconds of the time elapsed from some arbitrary, // but consistent point. double omp_get_wtime(void); time = omp_get_wtime(); return time; } int returnfalse() { return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
mk_comp_key_val.c
#include "q_incs.h" #include "mk_comp_key_val.h" /* As an example, if we have 3 raw attributes with the * first one having 2 derived attributes, * second one having 4 derived attributes, * third one having 3 derived attributes, * Then, * nC = 3 * nD = 2 + 3 + 4 * nR = (2+1) * (3+1) * (4+1) * Recall +1 is for the don't care case. * It is responsiblity of caller to make sure that (nV * nR) <= nK * */ int mk_comp_key_val( int **template, /* [nR][nC] */ int nR, int nC, /* 0 <= template[i][j] < nD */ uint8_t **in_dim_vals, /* [nD][nV] */ __VALTYPE__ *in_measure_val, /* [nV] */ uint64_t *out_key, /* [nK] */ __VALTYPE__ *out_val, /* [nK] */ int nV, int nK ) { int status = 0; #define BITS_FOR_VAL 8 // HARD CODED #define BITS_FOR_KEY 8 // HARD CODED int incr_shift_by = BITS_FOR_VAL + BITS_FOR_KEY; if ( ( nV*nR ) > nK ) { go_BYE(-1); } for ( int i = (nV*nR); i < nK; i++ ) { out_key[i] = 0; out_val[i] = 0; } int chunk_size = 64; #pragma omp parallel for schedule(static, chunk_size) for ( int i = 0; i < nV; i++ ) { int offset = i*nR; // every input produces nR outputs __VALTYPE__ val = in_measure_val[i]; for ( int ridx = 0; ridx < nR; ridx++ ) { uint64_t comp_key = 0; int shift_by = 0; for ( int cidx = 0; cidx < nC; cidx++ ) { uint32_t t_ridx_cidx = template[ridx][cidx]; if ( t_ridx_cidx == 0 ) { continue; } // Note the -1 because of Lua indexing versus C uint32_t key = ( t_ridx_cidx << BITS_FOR_VAL ) | in_dim_vals[t_ridx_cidx-1][i]; comp_key = comp_key | ( key << shift_by);; out_key[offset + ridx] = comp_key; out_val[offset + ridx] = val; shift_by += incr_shift_by; } } } BYE: return status; }
7z_fmt_plug.c
/* * 7-Zip cracker patch for JtR. Hacked together during June of 2013 by Dhiru * Kholia <dhiru at openwall.com>. Unicode support and other fixes by magnum. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and Copyright (c) 2013-2017 magnum, and it is hereby released to the general * public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ /* * We've seen one single sample where we could not trust the padding check * (early rejection). To be able to crack such hashes, define this to 0. * This hits performance in some cases. */ #define TRUST_PADDING 0 #if FMT_EXTERNS_H extern struct fmt_main fmt_sevenzip; #elif FMT_REGISTERS_H john_register_one(&fmt_sevenzip); #else #include <string.h> #include <errno.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #if !ARCH_LITTLE_ENDIAN #undef SIMD_COEF_32 #undef SIMD_PARA_SHA256 #endif #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "sha2.h" #include "crc32.h" #include "unicode.h" #include "dyna_salt.h" #include "lzma/LzmaDec.h" #include "lzma/Lzma2Dec.h" #define FORMAT_LABEL "7z" #define FORMAT_NAME "7-Zip" #define FORMAT_TAG "$7z$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define BENCHMARK_COMMENT " (512K iterations)" #define BENCHMARK_LENGTH 0 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt*) #define SALT_ALIGN sizeof(struct custom_salt*) #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #ifdef SIMD_COEF_32 #include "simd-intrinsics.h" #define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA256) #define GETPOS(i,idx) ( (idx&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) #define HASH_IDX_IN(idx) (((unsigned int)idx&(SIMD_COEF_32-1))+(unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32) #define HASH_IDX_OUT(idx) (((unsigned int)idx&(SIMD_COEF_32-1))+(unsigned int)idx/SIMD_COEF_32*8*SIMD_COEF_32) #define ALGORITHM_NAME "SHA256 " SHA256_ALGORITHM_NAME " AES" #define PLAINTEXT_LENGTH 28 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #else #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " AES" #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #include "memdbg.h" static struct fmt_tests sevenzip_tests[] = { /* CRC checks passes for this hash (4 bytes of padding) */ {"$7z$128$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"}, /* LZMA before CRC (9 bytes of padding) */ {"$7z$1$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f$232$5d00000100", "password"}, /* CRC checks passes for this hash (no padding) */ {"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"}, /* This requires LZMA (no padding) */ {"$7z$1$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda$176$5d00000100", "password"}, /* Length checks */ {"$7z$128$19$0$1122$8$94fb9024fdd3e6c40000000000000000$3965424295$112$99$1127828817ff126bc45ff3c5225d9d0c5d00a52094909674e6ed3dc431546d9a672738f2fa07556340d604d2efd2901b9d2ac2c0686c25af9c520c137b16c50c54df8703fd0b0606fa721ad70aafb9c4e3b288ef49864e6034021969b4ce11e3b8e269a92090ccf593c6a0da06262116", ""}, {"$7z$128$19$0$1122$8$6fd059d516d5490f0000000000000000$460747259$112$99$af163eb5532c557efca78fbb448aa04f348cd258c94233e6669f4e5025f220274c244d4f2347a7512571d9b6015a1e1a90e281983b743da957437b33092eddb55a5bc76f3ab6c7dbabb001578d1043285f5fa791fd94dd9779b461e44cbfe869f891007335b766774ccee3813ec8cd57", "&"}, {"$7z$128$19$0$1122$8$6d4a12af68d83bfe0000000000000000$993697592$112$99$7c308faa36b667599ee4418435ab621884c5c115ee3b70be454fe99236422f4f2d5cd9c8fcfbe6b6b0805ee602ce8488a08f7ea14a4f5c0c060fc685bff187720a402b23a5cfe3c9c5a5ae07f91209031b8f9804ac10459e15a0158031f6c58e507401ec6e1e6de8f64d94201159432b", "&'"}, {"$7z$128$19$0$1122$8$7527d758a59181830000000000000000$3917710544$112$99$61a9ca9e835bd0f2dc474b34d5d89bcf8cd1bb071a984ee1dcf224174a60bcee140fcf2fde8927fe4f3f4eb4a2cc39faff73f1898ae25cc92bd02939f4317ebb173bf3b6f01eef183163ddd533ad5c076f87341bd8b86d8460c68fc390aa8df89fc4076bdfd24e157f6c07e105c07612", "&'("}, {"$7z$128$19$0$1122$8$68928bade860a2b80000000000000000$3235890186$112$99$4b685a569c3aed78d217bae9ec64fa06b614df55c1cb0d160563d87efe38813accb38dd7037f86cebc91751c2488769c7398dfefaf491c024f2d640dcb388a56404cd5ac475ba16b5f8206fa45d5923b3a0c8dd0f24460ccee0d93bea03ad58b8a8db502a55ba1775560b3d194f342f7", "&'()"}, {"$7z$128$19$0$1122$8$81931b9ba0b069820000000000000000$3094344848$112$99$fdbb2622143d25b13992b1467ce9edce4e3df8ca07535735b76e8abcb0791e384a1d5547483e19c3bd6e5a0742d29c403cfc8b3a003b285e80b350ea9157600eb91c49b329903de9ec9b17d1c95b0e136b579e165a6e80550464fa99830bfd9ee58fc14516b614ff9f84ec80e6880a36", "&'()*"}, {"$7z$128$19$0$1122$8$ccf696913989510d0000000000000000$1238556212$112$99$647264fbc665e73ecfe3ef7055fef0d91cb86833d6df08b2f7a3c1c89cf7cdaa09a802c8bfb2e5c6b55143a315df74d841b349fc8b43613d0f87cc90325fd56fc17ee08df7ce76cdc9cda61bd4d5632e20af3db16e921c755174f291c0aa6581844def4547380e2dd4a574435d17e1e8", "&'()*+"}, {"$7z$128$19$0$1122$8$d618bd3ec8bafd800000000000000000$1349785$112$99$6514e2e7468e6f0ed63796cfc0588ac2d75f024c4a0fa03778bd252d316d03e48a08ffcc0011725ad4f867e9a9666630dff4f352c59bcbadb94b9d0e2c42d653b80f480005ce868a0b1a075b2e00abd743de0867d69cdc8b56c7f9770537d50e6bb11eb0d2d7d8b6af5dd8ecb50ab553", "&'()*+,"}, {"$7z$128$19$0$1122$8$1c1586d191f190890000000000000000$642253888$112$99$f55cf9ab802b10a83471abe9319711ae79906cd6921365167c389470a3a8a72b0d877379daae2c24ea2258e8586f12d5036aff9ddc8e26861467b0843ffb72e4410c2be76ec111d37f875c81b244ed172f1f4765a220d830a9615787e9d07f8582146556e9c566b64897a47d18a82b36", "&'()*+,-"}, #if DEBUG {"$7z$128$19$0$1122$8$0df03cbdbc73e22a0000000000000000$3194757927$112$99$df53e9d8b4e02cf2962ad87912021508a36910c399a7abc4a3a5423fa2184816af7172418eb4763924ec8b099b7ca95abdc6faac9aaa6e181ffa60b7e8bdb2bf576536ca69152e3b6b97302c796bbc9dec78db6ba7a4a58e68f8ee28f27dea26bd4f848dc3a3315e97e1463b5c171ce5", "&'()*+,-."}, {"$7z$128$19$0$1122$8$7785351cf9fe5dfa0000000000000000$1304801610$112$99$7b35280384726da8521fee0786ef43e0aa621394a6f015b65cbd7f1329f43c4543b8a451a0007c03a3ce3f61e639c54ede3e580600b113777822b6d562390d14ed236e5bac3d3af63ae23015148a95e7ccbc9eea653b52c606ca09ec51fd2b0c4cfc2b760fccc1fe0ccdd9ee3fcb8129", "&'()*+,-./"}, {"$7z$128$19$0$1122$8$70eb7f4b821cf5310000000000000000$3381356868$112$99$c26db2cb89df1237f323d92044726d03cfc7ba83115e789243c3b2570ae674d8356a23e004b103638b1ea9fe6ff5db844a1ddcaaed8a71a8d8e343f73868b4acafd34d493345439b0e0be87d2cf52eb4cceaafcff0dfaf9cf25080693ede267460320e1282b869a5f0b6c8789e769640", "&'()*+,-./0"}, {"$7z$128$19$0$1122$8$2ac0f1307794d8e10000000000000000$2871514580$112$99$4783d91fa72c377310654e961120e71ecdd27ec2e67366e83291daefcea03514ca9ecea031fcbd25c0759c1f242219e673cee093ef361664f18dacf85ca0620fd7092477ceeff7c548df0a475ce93278a564fe4ddb4ee2e4695cbe417a792e822204390ca5a530208a8ed51bc01f79e6", "&'()*+,-./01"}, {"$7z$128$19$0$1122$8$5bc4988c71cba8b70000000000000000$2815498089$112$99$0e4368dde66925e2bfac9a450291f8f817beaa891f08c4d2735d20b3147df581e2f3c53abfe2b0971186ac39280eb354ca5989f9043ad0288302d0ac59a3c8fa99d26c9619b81d22996f24eec1dba361afdd5e50060c2599a40a00c83c4ee0bc4ebe6e3126a64a743af95d9b22ee5867", "&'()*+,-./012"}, {"$7z$128$19$0$1122$8$33ab0ad513b7d6910000000000000000$107430285$112$99$f9f1195a4210eadc5b23f046f81c8cfaec3b90d8b6b67893f10bd9bedd0d859d0695bca5ce315cecbc2910dce27e4c1a1416675d841901c8d84846360b1919ebcba91143713c6b755758d3db64d39344da18222341818220cc43f3ee3a91cbc288f1aafe377b53def310d3b83d32aee3", "&'()*+,-./0123"}, {"$7z$128$19$0$1122$8$dd490a165c1b90f90000000000000000$2897354864$112$99$51efe41b67875503acebe2e199cb542a279520b468a61ba67b54612e317a84e95879a34eaad82124798f32c19f9c0786e8faaac768da5f6b2c91e3ba9f97a03a992c18b5b9b21a5f2b67ae9daeef37ec115f44bfb8b10ac3cb7862b6c024413a2ee801aa674df05e8b56bd8654f279f5", "&'()*+,-./01234"}, {"$7z$128$19$0$1122$8$9077cb191a5969b40000000000000000$3637063426$112$99$1e74746c59bdfe6b3f3d957493c9d5b92ba358f97e19d30e20443cb2fbac0501e07a162344ac7cf7cfa727c70a2bcf52593accc5c2c070c2331863ac76da5ad2f5de374292a87c6af67ab561f9cf71ae472ed1267d481c250f5b4d82d0ec0b2b8531db1fe4637c3f4e3a08de1b9b5418", "&'()*+,-./012345"}, {"$7z$128$19$0$1122$8$adc090d27b0343d30000000000000000$1147570982$112$99$ac14b9dc3751cfe6c1c719ceef3d73946fff2b0f924e06cd3177883df770e5505551bcf5598277801f46584a4f41530f50007c776d2bb91fd160148042275dfe4e420ff72244409f59c687a5bb2d0fc1bb29138689094fe40bb0f22785c63c631cd05abf4f7f3c9b6832e192e103d2f1", "&'()*+,-./0123456"}, {"$7z$128$19$0$1122$8$8dee69dc35517a2a0000000000000000$87427823$112$99$ea36cf8b577a0b5f31115f8550987f05f174b347a8a6433a08c013ecd816c8ecaad163c62db9bae6c57ace3c2a6ce0b36f78ad4723328cc022906400eed55e0e3685a5e8e6b369df780ee72f3d25ccd49d7f40d013052e080723dd4c0b1c75302c884ea956e3b6fd27261eb8c49dea51", "&'()*+,-./01234567"}, {"$7z$128$19$0$1122$8$200ce603d6f355f10000000000000000$3012105149$112$99$0ae42342f52172ad921178a25df3666e34e5a217d0afb3655088806f821d374bf522c197e59b131dbc574d4c936472f59f8892f69e47724ea52ecc5dc7d3ed734c557c9698a6f01519039714c065ad25008003c93cb7f694ee07267d5fcdebab5d149d5404023a0112faec2264d33ff6", "&'()*+,-./012345678"}, {"$7z$128$19$0$1122$8$a5007fc77fa5cc0b0000000000000000$1082728565$112$99$32c404c9633e9c61b76556e169695248008c51ca8f7f0f79c4a271ac6eb1d905a2622132f2f6988f9f3f5e375c592ec63d92d7b183b5801b149595ed440b23a083633de9f1cb5b6ac3238b7523b23141e686e6cbe9d4d3a28fc6489e902c17aeff6cd4cb516bef5cd5c6def78cb88ad4", "&'()*+,-./0123456789"}, {"$7z$128$19$0$1122$8$fd531c4e580be9a60000000000000000$1843420503$112$99$704289830b1add1c8ee6fd622ecf5b8da01988580bdb52f6269cc61c21838849d3a04299eaee15e0cae0eff9f6c3c82f71e434b3aa1c0ca824b90438c1c983130218acd128d9186e5dc2d19a8db602a0382cb60dadb4641b46fe532b799d29a4b882beaa9217f48ddccc99578617f8a0", "&'()*+,-./0123456789:"}, {"$7z$128$19$0$1122$8$7f94a95f71c1b0df0000000000000000$141406606$112$99$1a510a6fda9788b4f4b2274ea929044c00b61b23946bc417ead90ad64dcc9a55378f9ab74f7d693a5dcf455c00f82f6c2a885b664f4ab10c9969026714ce2773030f1c5872ca3948cd612e21b321826c2a561104d57a3ba2055f03aa9cc264821544ec4bccc41f4ac76aab97accb8f9c", "&'()*+,-./0123456789:;"}, {"$7z$128$19$0$1122$8$e24e93c7a9ebde080000000000000000$718561925$112$99$580bf36388526c932c22e3227b51774b6963a9c5b96fc8e2ac70a4302864fa88f50e7c00d9a79e0bca0f07a236e51200dc23435b7680e6fa99b19d790ac093af615a972f8b232686c21279234a2582f9714c5a1a2d326084158eba3e81b4f8ad40784d84baa8ddbed19f1c6603156d2c", "&'()*+,-./0123456789:;<"}, {"$7z$128$19$0$1122$8$6fbd519735b131710000000000000000$1248418560$112$99$cc9e3c97073d7fd37f04d4e6983b386e3ac00f6292dedb0f566dccf22cdbbb55fee8669edade383e96aa0a740e2b42aa7fddbe5831cac10828c624ee03a1a256c6e777c3d714c55296cb815c509a252b9426fe8d4566c944efe3fac5ea94910e55a390aef2c729a031e832c406049810", "&'()*+,-./0123456789:;<="}, {"$7z$128$19$0$1122$8$3ce1b899fc03d9c30000000000000000$1452122600$112$99$d4be60d5ab390713c7189f0dd808227c01f15f71fcf4bbccce6cb9238d6418c115eff59784d96ff8944575710a5799c7bcb761e8f1bfb7646a0e8fac3728ba4cca44fb82e5dd9f87bb26828566af64374b512fa094d35af8d743bded88b6257ec98a99b50dd225d4608b283bf035ac08", "&'()*+,-./0123456789:;<=>"}, {"$7z$128$19$0$1122$8$656e2285aabed25b0000000000000000$3885982465$112$99$77f2871e556e7f5278a9e896e91cd386ca8935128957d31fdce0603ea0e71c08b908a4c2d9f2d279757ced848be9482067c9d7935c88e5233aaa94a101d29908f7f015646758029d2078d25d0886bb9f0cdc0dd5136d72e90ceeea678564b199866dd8c9e5fe927102ee2dcf1cd4167f", "&'()*+,-./0123456789:;<=>?"}, {"$7z$128$19$0$1122$8$44ffefa48fa5a5b00000000000000000$1011653568$112$99$5d2504a1eb819218b9ad552e377d37e811ffccb64a554f404d982d209edfafb893b679cc881bbcbc606e67ffa055f712d7f140b554769511bc00321765830ea7c5db810fa2000ae7f4250b74aa61d881db66ae6f30e4c8e71887960c117b268d9934b8b5d52d4abdcb42b0e4ff40b805", "&'()*+,-./0123456789:;<=>?@"}, {"$7z$128$19$0$1122$8$b6e089dd0c52b6b80000000000000000$1229766981$112$99$49a8334d64d9cc7d710fe3b9c35f5d7cb0ec44d5db8a90966fbee93f85fdeeeca859c55519addb20c4628c9204dd24d1169b34dc53a2a685440fae7ed6748c172a8e9dcc42c8dffe60196818ad17a6f9314fcfd4d97cab3c18cf279df344e00fd04eaff32f29cbfcdb6832cfb69fe351", "&'()*+,-./0123456789:;<=>?@A"}, #endif /* DEBUG */ {NULL} }; static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static int *cracked; static int new_keys; static int max_kpc; static unsigned char (*master)[32]; #ifdef SIMD_COEF_32 static uint32_t (*vec_in)[2][NBKEYS*16]; static uint32_t (*vec_out)[NBKEYS*8]; static int *indices; #endif static struct custom_salt { dyna_salt dsalt; size_t length; /* used in decryption */ size_t unpacksize; /* used in padding check */ size_t crc_len; /* used in CRC calculation */ int NumCyclesPower; int SaltSize; int ivSize; int type; unsigned char iv[16]; unsigned char salt[16]; unsigned int crc; unsigned char props[LZMA_PROPS_SIZE]; unsigned char data[1]; } *cur_salt; static void init(struct fmt_main *self) { CRC32_t crc; #if defined (_OPENMP) int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif // allocate 1 more slot to handle the tail of vector buffer max_kpc = self->params.max_keys_per_crypt + 1; saved_key = mem_calloc(max_kpc, sizeof(*saved_key)); saved_len = mem_calloc(max_kpc, sizeof(*saved_len)); cracked = mem_calloc(max_kpc, sizeof(*cracked)); #ifdef SIMD_COEF_32 vec_in = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*vec_in), MEM_ALIGN_CACHE); vec_out = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*vec_out), MEM_ALIGN_CACHE); #endif CRC32_Init(&crc); if (options.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); MEM_FREE(saved_len); MEM_FREE(master); #ifdef SIMD_COEF_32 MEM_FREE(vec_in); MEM_FREE(vec_out); MEM_FREE(indices); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int type, len, NumCyclesPower; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (strlen(p) > 3 || !isdec(p)) goto err; type = atoi(p); if (strlen(p) == 0 || type < 0 || type > 128) /* Compression type */ goto err; if (type > 2 && type != 128) /* none, LZMA or LZMA2 */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* NumCyclesPower */ goto err; if (strlen(p) > 2) goto err; if (!isdec(p)) goto err; NumCyclesPower = atoi(p); if (NumCyclesPower > 24 || NumCyclesPower < 1) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > 16) /* salt length */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iv length */ goto err; if (strlen(p) > 2) goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > 16) /* iv length */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iv */ goto err; if (!ishexlc(p)) goto err; if (strlen(p) / 2 > len && strcmp(p+len*2, "0000000000000000")) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* crc */ goto err; if (!isdecu(p) && !isdec_negok(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* data length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* unpacksize */ goto err; if (!isdec(p)) /* no way to validate, other than atoi() works for it */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* data */ goto err; if (strlen(p) / 2 != len) /* validates data_len atoi() */ goto err; if (!ishexlc(p)) goto err; if (type && type != 128) { if ((p = strtokm(NULL, "$")) == NULL) /* CRC len */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* Coder props */ goto err; if (!ishexlc(p)) goto err; if (type == 1 && strlen(p) != 10) goto err; else if (type == 2 && strlen(p) != 2) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { struct custom_salt cs; struct custom_salt *psalt; static void *ptr; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; if (!ptr) ptr = mem_alloc_tiny(sizeof(struct custom_salt*), sizeof(struct custom_salt*)); memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.type = atoi(p); p = strtokm(NULL, "$"); cs.NumCyclesPower = atoi(p); p = strtokm(NULL, "$"); cs.SaltSize = atoi(p); p = strtokm(NULL, "$"); /* salt */ p = strtokm(NULL, "$"); cs.ivSize = atoi(p); p = strtokm(NULL, "$"); /* iv */ for (i = 0; i < cs.ivSize; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); /* crc */ cs.crc = atou(p); /* unsigned function */ p = strtokm(NULL, "$"); cs.length = atoll(p); psalt = malloc(sizeof(struct custom_salt) + cs.length - 1); memcpy(psalt, &cs, sizeof(cs)); p = strtokm(NULL, "$"); psalt->unpacksize = atoll(p); p = strtokm(NULL, "$"); /* data */ for (i = 0; i < psalt->length; i++) psalt->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if (cs.type && cs.type != 128) { p = strtokm(NULL, "$"); /* CRC length */ psalt->crc_len = atoi(p); p = strtokm(NULL, "$"); /* Coder properties */ for (i = 0; p[i * 2] ; i++) psalt->props[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } MEM_FREE(keeptr); psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, length); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, length, data, psalt->length); psalt->dsalt.salt_alloc_needs_free = 1; memcpy(ptr, &psalt, sizeof(void*)); return ptr; } static void set_salt(void *salt) { static int old_power; cur_salt = *((struct custom_salt**)salt); if (old_power != cur_salt->NumCyclesPower) { new_keys = 1; old_power = cur_salt->NumCyclesPower; } } static int salt_compare(const void *x, const void *y) { int c; const struct custom_salt *s1 = *((struct custom_salt**)x); const struct custom_salt *s2 = *((struct custom_salt**)y); // we had to make the salt order deterministic, so that intersalt-restore works if (s1->NumCyclesPower != s2->NumCyclesPower) return (s1->NumCyclesPower - s2->NumCyclesPower); c = memcmp(s1->salt, s2->salt, 16); if (c) return c; return memcmp(s1->iv, s2->iv, 16); } static void *SzAlloc(void *p, size_t size) { return mem_alloc(size); } static void SzFree(void *p, void *address) { MEM_FREE(address) }; static int sevenzip_decrypt(unsigned char *derived_key) { unsigned char *out = NULL; AES_KEY akey; unsigned char iv[16]; union { unsigned char crcc[4]; unsigned int crci; } _crc_out; unsigned char *crc_out = _crc_out.crcc; unsigned int ccrc; CRC32_t crc; int i; int nbytes, pad_size; size_t crc_len = cur_salt->unpacksize; size_t aes_len = cur_salt->crc_len ? (cur_salt->crc_len * 11 + 150) / 160 * 16 : crc_len; pad_size = nbytes = cur_salt->length - cur_salt->unpacksize; /* * Early rejection (only decrypt last 16 bytes). We don't seem to * be able to trust this, see #2532, so we only do it for truncated * hashes (it's the only thing we can do!). */ if ((cur_salt->type == 0x80 || TRUST_PADDING) && pad_size > 0 && cur_salt->length >= 32) { uint8_t buf[16]; memcpy(iv, cur_salt->data + cur_salt->length - 32, 16); AES_set_decrypt_key(derived_key, 256, &akey); AES_cbc_encrypt(cur_salt->data + cur_salt->length - 16, buf, 16, &akey, iv, AES_DECRYPT); i = 15; while (nbytes > 0) { if (buf[i] != 0) return 0; nbytes--; i--; } if (cur_salt->type == 0x80) /* We only have truncated data */ return 1; } /* Complete decryption, or partial if possible */ aes_len = nbytes ? cur_salt->length : MIN(aes_len, cur_salt->length); out = mem_alloc(aes_len); memcpy(iv, cur_salt->iv, 16); AES_set_decrypt_key(derived_key, 256, &akey); AES_cbc_encrypt(cur_salt->data, out, aes_len, &akey, iv, AES_DECRYPT); /* Padding check unless we already did the quick one */ if (TRUST_PADDING && nbytes) { i = cur_salt->length - 1; while (nbytes > 0) { if (out[i] != 0) goto exit_bad; nbytes--; i--; } } if (cur_salt->type == 0x80) /* We only have truncated data */ goto exit_good; /* Optional decompression before CRC */ if (cur_salt->type == 1) { ISzAlloc st_alloc = {SzAlloc, SzFree}; ELzmaStatus status; size_t in_size = aes_len; uint8_t *new_out; SRes rc; size_t out_size = cur_salt->crc_len; new_out = mem_alloc(out_size); if ((rc = LzmaDecode(new_out, &out_size, out, &in_size, cur_salt->props, LZMA_PROPS_SIZE, LZMA_FINISH_ANY, &status, &st_alloc)) == SZ_OK && out_size == cur_salt->crc_len) { MEM_FREE(out); out = new_out; crc_len = cur_salt->crc_len; } else { MEM_FREE(new_out); goto exit_bad; } } else if (cur_salt->type == 2) { Byte prop = cur_salt->props[0]; ISzAlloc st_alloc = {SzAlloc, SzFree}; ELzmaStatus status; size_t in_size = aes_len; uint8_t *new_out; SRes rc; size_t out_size = cur_salt->crc_len; new_out = mem_alloc(out_size); if ((rc = Lzma2Decode((Byte*)new_out, &out_size, out, &in_size, prop, LZMA_FINISH_ANY, &status, &st_alloc)) == SZ_OK && out_size == cur_salt->crc_len) { MEM_FREE(out); out = new_out; crc_len = cur_salt->crc_len; } else { MEM_FREE(new_out); goto exit_bad; } } /* CRC test */ CRC32_Init(&crc); CRC32_Update(&crc, out, crc_len); CRC32_Final(crc_out, crc); ccrc = _crc_out.crci; /* computed CRC */ #if !ARCH_LITTLE_ENDIAN ccrc = JOHNSWAP(ccrc); #endif if (ccrc == cur_salt->crc) goto exit_good; exit_bad: MEM_FREE(out); return 0; exit_good: MEM_FREE(out); return 1; } #ifdef SIMD_COEF_32 static void sevenzip_kdf(int buf_idx, int *indices, unsigned char *master) { int i, j; long long round, rounds = (long long) 1 << cur_salt->NumCyclesPower; uint32_t (*buf_in)[NBKEYS*16] = vec_in[buf_idx]; uint32_t *buf_out = vec_out[buf_idx]; int pw_len = saved_len[indices[0]]; int tot_len = (pw_len + 8)*rounds; int acc_len = 0; #if !ARCH_LITTLE_ENDIAN unsigned char temp[8] = { 0,0,0,0,0,0,0,0 }; #endif int cur_buf = 0; int fst_blk = 1; // it's assumed rounds is divisible by 64 for (round = 0; round < rounds; ++round) { // copy password to vector buffer for (i = 0; i < NBKEYS; ++i) { UTF16 *buf = saved_key[indices[i]]; for (j = 0; j < pw_len; ++j) { int len = acc_len + j; char *in = (char*)buf_in[(len & 64)>>6]; in[GETPOS(len%64, i)] = ((char*)buf)[j]; } for (j = 0; j < 8; ++j) { int len = acc_len + pw_len + j; char *in = (char*)buf_in[(len & 64)>>6]; #if ARCH_LITTLE_ENDIAN in[GETPOS(len%64, i)] = ((char*)&round)[j]; #else in[GETPOS(len%64, i)] = temp[j]; #endif } } #if !ARCH_LITTLE_ENDIAN for (j = 0; j < 8; j++) if (++(temp[j]) != 0) break; #endif acc_len += (pw_len + 8); // swap out and compute digest on the filled buffer if ((acc_len & 64) != (cur_buf << 6)) { if (fst_blk) SIMDSHA256body(buf_in[cur_buf], buf_out, NULL, SSEi_MIXED_IN); else SIMDSHA256body(buf_in[cur_buf], buf_out, buf_out, SSEi_MIXED_IN | SSEi_RELOAD); fst_blk = 0; cur_buf = 1 - cur_buf; } } // padding memset(buf_in[0], 0, sizeof(buf_in[0])); for (i = 0; i < NBKEYS; ++i) { buf_in[0][HASH_IDX_IN(i)] = (0x80U << 24); buf_in[0][HASH_IDX_IN(i) + 15*SIMD_COEF_32] = tot_len*8; } SIMDSHA256body(buf_in[0], buf_out, buf_out, SSEi_MIXED_IN | SSEi_RELOAD); // copy out result for (i = 0; i < NBKEYS; ++i) { uint32_t *m = (uint32_t*)&master[i*32]; for (j = 0; j < 32/4; ++j) m[j] = JOHNSWAP(buf_out[HASH_IDX_OUT(i) + j*SIMD_COEF_32]); } } #else static void sevenzip_kdf(int index, unsigned char *master) { long long rounds = (long long) 1 << cur_salt->NumCyclesPower; long long round; #if !ARCH_LITTLE_ENDIAN int i; unsigned char temp[8] = { 0,0,0,0,0,0,0,0 }; #endif SHA256_CTX sha; /* kdf */ SHA256_Init(&sha); for (round = 0; round < rounds; round++) { if (cur_salt->SaltSize) SHA256_Update(&sha, cur_salt->salt, cur_salt->SaltSize); SHA256_Update(&sha, (char*)saved_key[index], saved_len[index]); #if ARCH_LITTLE_ENDIAN SHA256_Update(&sha, (char*)&round, 8); #else SHA256_Update(&sha, temp, 8); for (i = 0; i < 8; i++) if (++(temp[i]) != 0) break; #endif } SHA256_Final(master, &sha); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef SIMD_COEF_32 static int tot_todo; int len; /* Tricky formula, see GitHub #1692 :-) */ if (!indices) indices = mem_alloc((max_kpc + MIN(PLAINTEXT_LENGTH + 1, max_kpc) * (NBKEYS - 1)) * sizeof(int)); if (!master) master = mem_alloc((max_kpc + MIN(PLAINTEXT_LENGTH + 1, max_kpc) * (NBKEYS - 1)) * sizeof(*master)); #else if (!master) master = mem_alloc(max_kpc * sizeof(*master)); #endif #ifdef SIMD_COEF_32 if (new_keys) { // sort passwords by length tot_todo = 0; for (len = 0; len <= PLAINTEXT_LENGTH*2; len += 2) { for (index = 0; index < count; ++index) { if (saved_len[index] == len) indices[tot_todo++] = index; } while (tot_todo % NBKEYS) indices[tot_todo++] = count; } } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < tot_todo; index += NBKEYS) { int j; if (new_keys) sevenzip_kdf(index/NBKEYS, indices + index, master[index]); /* do decryption and checks */ for (j = 0; j < NBKEYS; ++j) { cracked[indices[index + j]] = sevenzip_decrypt(master[index + j]); } } #else #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { /* derive key */ if (new_keys) sevenzip_kdf(index, master[index]); /* do decryption and checks */ cracked[index] = sevenzip_decrypt(master[index]); } #endif // SIMD_COEF_32 new_keys = 0; return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void sevenzip_set_key(char *key, int index) { /* Convert key to utf-16-le format (--encoding aware) */ int len; len = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (len <= 0) { key[-len] = 0; // match truncation len = strlen16(saved_key[index]); } len *= 2; saved_len[index] = len; new_keys = 1; } static char *get_key(int index) { return (char*)utf16_to_enc(saved_key[index]); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = *((struct custom_salt **)salt); return (unsigned int)(1 << my_salt->NumCyclesPower); } static unsigned int padding_size(void *salt) { struct custom_salt *my_salt; my_salt = *((struct custom_salt **)salt); return my_salt->length - my_salt->unpacksize; } static unsigned int compression_type(void *salt) { struct custom_salt *my_salt; my_salt = *((struct custom_salt **)salt); return my_salt->type; } struct fmt_main fmt_sevenzip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_DYNA_SALT | FMT_HUGE_INPUT, { "iteration count", "padding size", "compression type", }, { FORMAT_TAG }, sevenzip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, padding_size, compression_type, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, salt_compare, set_salt, sevenzip_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__cosh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cosh_fc64_fc64 // op(A') function: GB_unop_tran__cosh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccosh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccosh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccosh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COSH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cosh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccosh (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cosh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lapl_ss.c
#include <xmmintrin.h> #include "defs.h" /* * Convert to super-site packed format */ void to_supersite(supersite *ssarr, float *arr) { for(int y=0; y<Ly; y++) for(int x=0; x<Lx/4; x++) { int vv = x + (Lx/4)*y; int v = x + (Lx)*y; ssarr[vv].site4[0] = arr[v+0*Lx/4]; ssarr[vv].site4[1] = arr[v+1*Lx/4]; ssarr[vv].site4[2] = arr[v+2*Lx/4]; ssarr[vv].site4[3] = arr[v+3*Lx/4]; } return; } /* * Convert from super-site packed format */ void from_supersite(float *arr, supersite *ssarr) { for(int y=0; y<Ly; y++) for(int x=0; x<Lx/4; x++) { int vv = x + (Lx/4)*y; int v = x + (Lx)*y; arr[v+0*Lx/4] = ssarr[vv].site4[0]; arr[v+1*Lx/4] = ssarr[vv].site4[1]; arr[v+2*Lx/4] = ssarr[vv].site4[2]; arr[v+3*Lx/4] = ssarr[vv].site4[3]; } return; } /* * Single iteration of lapl equation on super-site packed arrays * Super-site packing helps in vectorization */ void lapl_iter_supersite(supersite *out, float sigma, supersite *in) { #pragma omp parallel firstprivate(out, in, sigma) { float delta = sigma / (1+4*sigma); float norm = 1./(1+4*sigma); __m128 vnorm = _mm_load1_ps(&norm); __m128 vdelta = _mm_load1_ps(&delta); /* Do lapl iteration on volume, ommiting boundaries in x-direction */ #pragma omp for nowait for(int y=0; y<Ly; y++) for(int x=1; x<Lx/4-1; x++) { int lx = Lx/4; int v00 = x+y*lx; int v0p = v00+1; int v0m = v00-1; int vp0 = x + ((y+1)%Ly)*lx; int vm0 = x + ((Ly+(y-1))%Ly)*lx; __m128 in00 = _mm_load_ps(&in[v00].site4[0]); __m128 in0p = _mm_load_ps(&in[v0p].site4[0]); __m128 in0m = _mm_load_ps(&in[v0m].site4[0]); __m128 inp0 = _mm_load_ps(&in[vp0].site4[0]); __m128 inm0 = _mm_load_ps(&in[vm0].site4[0]); __m128 hop = _mm_add_ps(inm0, inp0); hop = _mm_add_ps(hop, in0p); hop = _mm_add_ps(hop, in0m); hop = _mm_mul_ps(hop, vdelta); __m128 dia = _mm_mul_ps(vnorm, in00); hop = _mm_add_ps(dia, hop); _mm_store_ps(&out[v00].site4[0], hop); } /* Do lapl iteration on x = 0 boundary sites */ #pragma omp for nowait for(int y=0; y<Ly; y++) { int lx = Lx/4; int x = 0; int v00 = x+y*lx; int v0p = v00+1; int v0m = lx-1+y*lx; int vp0 = x + ((y+1)%Ly)*lx; int vm0 = x + ((Ly+(y-1))%Ly)*lx; __m128 in00 = _mm_load_ps(&in[v00].site4[0]); __m128 in0p = _mm_load_ps(&in[v0p].site4[0]); __m128 in0m = _mm_load_ps(&in[v0m].site4[0]); in0m = _mm_shuffle_ps(in0m, in0m, _MM_SHUFFLE(2,1,0,3)); __m128 inp0 = _mm_load_ps(&in[vp0].site4[0]); __m128 inm0 = _mm_load_ps(&in[vm0].site4[0]); __m128 hop = _mm_add_ps(inm0, inp0); hop = _mm_add_ps(hop, in0p); hop = _mm_add_ps(hop, in0m); hop = _mm_mul_ps(hop, vdelta); __m128 dia = _mm_mul_ps(vnorm, in00); hop = _mm_add_ps(dia, hop); _mm_store_ps(&out[v00].site4[0], hop); } /* Do lapl iteration on x = Lx-1 boundary sites */ #pragma omp for nowait for(int y=0; y<Ly; y++) { int lx = Lx/4; int x = lx-1; int v00 = x+y*lx; int v0p = y*lx; int v0m = v00-1; int vp0 = x + ((y+1)%Ly)*lx; int vm0 = x + ((Ly+(y-1))%Ly)*lx; __m128 in00 = _mm_load_ps(&in[v00].site4[0]); __m128 in0p = _mm_load_ps(&in[v0p].site4[0]); in0p = _mm_shuffle_ps(in0p, in0p, _MM_SHUFFLE(0,3,2,1)); __m128 in0m = _mm_load_ps(&in[v0m].site4[0]); __m128 inp0 = _mm_load_ps(&in[vp0].site4[0]); __m128 inm0 = _mm_load_ps(&in[vm0].site4[0]); __m128 hop = _mm_add_ps(inm0, inp0); hop = _mm_add_ps(hop, in0p); hop = _mm_add_ps(hop, in0m); hop = _mm_mul_ps(hop, vdelta); __m128 dia = _mm_mul_ps(vnorm, in00); hop = _mm_add_ps(dia, hop); _mm_store_ps(&out[v00].site4[0], hop); } } }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride); typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0); RhsScalar* blockB = blocking.blockB(); eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); // Pack B_k to B' in a parallel fashion: // each thread packs the sub block B_k,j to B'_j where j is the thread id. // However, before copying to B'_j, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); // Notify the other threads that the part B'_j is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per B'_j for(Index shift=0; shift<threads; ++shift) { Index j = (tid+shift)%threads; // At this point we have to make sure that B'_j has been updated by the thread j, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[j].sync!=k) {} gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' for(Index i=mc; i<rows; i+=mc) { const Index actual_mc = (std::min)(i+mc,rows)-i; // pack A_i,k to A' pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); // C_i += A' * B' gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w); } // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j<threads; ++j) #pragma omp atomic (info[j].users) -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*cols; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack rhs's panel into a sequential chunk of memory (L2 caching) // Note that this panel will be read as many times as the number of blocks in the lhs's // vertical panel which is, in practice, a very low number. pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols); // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; // We pack the lhs's block into a sequential chunk of memory (L1 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro vertical panel of the large rhs's panel (e.g., cols/4 times). pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc); // Everything is packed, we can now call the block * panel kernel: gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateB(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; RhsScalar* m_blockW; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } inline RhsScalar* blockW() { return m_blockW; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, SizeW = MaxDepth * Traits::WorkSpaceFactor }; EIGEN_ALIGN16 LhsScalar m_staticA[SizeA]; EIGEN_ALIGN16 RhsScalar m_staticB[SizeB]; EIGEN_ALIGN16 RhsScalar m_staticW[SizeW]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; this->m_blockW = m_staticW; } inline void allocateA() {} inline void allocateB() {} inline void allocateW() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; DenseIndex m_sizeW; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc*Traits::WorkSpaceFactor; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateW() { if(this->m_blockW==0) this->m_blockW = aligned_new<RhsScalar>(m_sizeW); } void allocateAll() { allocateA(); allocateB(); allocateW(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); aligned_delete(this->m_blockW, m_sizeW); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); } template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
GB_AxB_dot4_template.c
//------------------------------------------------------------------------------ // GB_AxB_dot4: C+=A'*B via dot products, where C is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C+=A'*B where C is a dense matrix and computed in-place. The monoid of the // semiring matches the accum operator, and the type of C matches the ztype of // accum. That is, no typecasting can be done with C. // The PAIR operator as the multiplier provides important special cases. // See Template/GB_AxB_dot_cij.c for details. // cij += A(k,i) * B(k,j) #undef GB_DOT_MERGE #define GB_DOT_MERGE \ { \ if (!cij_updated) \ { \ cij_updated = true ; \ GB_GETC (cij, pC) ; \ } \ GB_GETA (aki, Ax, pA) ; /* aki = A(k,i) */ \ GB_GETB (bkj, Bx, pB) ; /* bkj = B(k,j) */ \ GB_MULTADD (cij, aki, bkj) ; /* cij += aki * bkj */ \ GB_DOT_TERMINAL (cij) ; /* break if cij == terminal */ \ pA++ ; \ pB++ ; \ } { //-------------------------------------------------------------------------- // get A, B, and C //-------------------------------------------------------------------------- GB_CTYPE *GB_RESTRICT Cx = C->x ; const int64_t cvlen = C->vlen ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; const int64_t *GB_RESTRICT Bi = B->i ; const GB_BTYPE *GB_RESTRICT Bx = B_is_pattern ? NULL : B->x ; const int64_t bvlen = B->vlen ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ai = A->i ; const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ; ASSERT (A->vlen == B->vlen) ; int ntasks = naslice * nbslice ; //-------------------------------------------------------------------------- // C += A'*B //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the entries in A and B to compute //---------------------------------------------------------------------- int a_taskid = taskid / nbslice ; int b_taskid = taskid % nbslice ; int64_t akfirst = A_slice [a_taskid] ; int64_t aklast = A_slice [a_taskid+1] ; if (akfirst >= aklast) continue ; int64_t bkfirst = B_slice [b_taskid] ; int64_t bklast = B_slice [b_taskid+1] ; if (bkfirst >= bklast) continue ; //---------------------------------------------------------------------- // C+=A'*B via dot products //---------------------------------------------------------------------- for (int64_t bk = bkfirst ; bk < bklast ; bk++) { //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t j = (Bh == NULL) ? bk : Bh [bk] ; int64_t pB_start = Bp [bk] ; int64_t pB_end = Bp [bk+1] ; int64_t pC_start = j * cvlen ; int64_t bjnz = pB_end - pB_start ; if (bjnz == 0) continue ; if (bjnz == bvlen) { //-------------------------------------------------------------- // B(:,j) is dense //-------------------------------------------------------------- for (int64_t ak = akfirst ; ak < aklast ; ak++) { //---------------------------------------------------------- // get A(:,i) //---------------------------------------------------------- int64_t i = (Ah == NULL) ? ak : Ah [ak] ; int64_t pA = Ap [ak] ; int64_t pA_end = Ap [ak+1] ; int64_t ainz = pA_end - pA ; if (ainz == 0) continue ; GB_CIJ_DECLARE (cij) ; // declare the cij scalar int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC] int64_t pB = pB_start ; GB_GETC (cij, pC) ; // cij = Cx [pC] //---------------------------------------------------------- // special cases for the PAIR multiplier //---------------------------------------------------------- // Since B(:,j) is dense, C(i,j) += A(:,i)'*B(:,j) is // trivial to compute with the PAIR multiplier. #if GB_IS_PAIR_MULTIPLIER #if GB_IS_ANY_MONOID // ANY monoid: take the first entry found cij = 1 ; #elif GB_IS_EQ_MONOID // A(:,i)'*B(:j) is one, so this result must be // accumulated into cij, as cij += 1, where the // accumulator is the EQ operator. cij = (cij == 1) ; #elif (GB_CTYPE_BITS > 0) // PLUS, XOR monoids: A(:,i)'*B(:,j) is nnz(A(:,i)), // for bool, 8-bit, 16-bit, or 32-bit integer uint64_t t = ((uint64_t) cij) + ainz ; cij = (GB_CTYPE) (t & GB_CTYPE_BITS) ; #else // PLUS monoid for float, double, or 64-bit integers cij += (GB_CTYPE) ainz ; #endif #else //---------------------------------------------------------- // general case //---------------------------------------------------------- if (ainz == bvlen) { //------------------------------------------------------ // both A(:,i) and B(:,j) are dense //------------------------------------------------------ GB_PRAGMA_VECTORIZE_DOT for (int64_t k = 0 ; k < bvlen ; k++) { GB_DOT_TERMINAL (cij) ; // break if terminal // cij += A(k,i) * B(k,j) GB_GETA (aki, Ax, pA+k) ; // aki = A(k,i) GB_GETB (bkj, Bx, pB+k) ; // bkj = B(k,j) GB_MULTADD (cij, aki, bkj) ; // cij += aki * bkj } } else { //------------------------------------------------------ // A(:,i) is sparse and B(:,j) is dense //------------------------------------------------------ GB_PRAGMA_VECTORIZE_DOT for (int64_t p = pA ; p < pA_end ; p++) { GB_DOT_TERMINAL (cij) ; // break if terminal int64_t k = Ai [p] ; // cij += A(k,i) * B(k,j) GB_GETA (aki, Ax, p ) ; // aki = A(k,i) GB_GETB (bkj, Bx, pB+k) ; // bkj = B(k,j) GB_MULTADD (cij, aki, bkj) ; // cij += aki * bkj } } #endif GB_PUTC (cij, pC) ; // Cx [pC] = cij } } else { //-------------------------------------------------------------- // B(:,j) is sparse //-------------------------------------------------------------- // get the first and last index in B(:,j) int64_t ib_first = Bi [pB_start] ; int64_t ib_last = Bi [pB_end-1] ; for (int64_t ak = akfirst ; ak < aklast ; ak++) { //---------------------------------------------------------- // get A(:,i) //---------------------------------------------------------- int64_t i = (Ah == NULL) ? ak : Ah [ak] ; int64_t pA = Ap [ak] ; int64_t pA_end = Ap [ak+1] ; int64_t ainz = pA_end - pA ; if (ainz == 0) continue ; // get the first and last index in A(:,i) if (Ai [pA_end-1] < ib_first || ib_last < Ai [pA]) continue; //---------------------------------------------------------- // C(i,j) += A(:,i)'*B(:,j) //---------------------------------------------------------- GB_CIJ_DECLARE (cij) ; // declare the cij scalar int64_t pC = i + pC_start ; // C(i,j) is at Cx [pC] int64_t pB = pB_start ; if (ainz == bvlen) { //------------------------------------------------------ // A(:,i) is dense and B(:,j) is sparse //------------------------------------------------------ GB_GETC (cij, pC) ; // cij = Cx [pC] #if GB_IS_PAIR_MULTIPLIER #if GB_IS_ANY_MONOID // ANY monoid: take the first entry found cij = 1 ; #elif GB_IS_EQ_MONOID // A(:,i)'*B(:j) is one, so this result must be // accumulated into cij, as cij += 1, where the // accumulator is the EQ operator. cij = (cij == 1) ; #elif (GB_CTYPE_BITS > 0) // PLUS, XOR monoids: A(:,i)'*B(:,j) is nnz(A(:,i)), // for bool, 8-bit, 16-bit, or 32-bit integer uint64_t t = ((uint64_t) cij) + bjnz ; cij = (GB_CTYPE) (t & GB_CTYPE_BITS) ; #else // PLUS monoid for float, double, or 64-bit integers cij += (GB_CTYPE) bjnz ; #endif #else GB_PRAGMA_VECTORIZE_DOT for (int64_t p = pB ; p < pB_end ; p++) { GB_DOT_TERMINAL (cij) ; // break if terminal int64_t k = Bi [p] ; // cij += A(k,i) * B(k,j) GB_GETA (aki, Ax, pA+k) ; // aki = A(k,i) GB_GETB (bkj, Bx, p ) ; // bkj = B(k,j) GB_MULTADD (cij, aki, bkj) ; // cij += aki*bkj } #endif GB_PUTC (cij, pC) ; // Cx [pC] = cij } else if (ainz > 8 * bjnz) { //------------------------------------------------------ // B(:,j) is very sparse compared to A(:,i) //------------------------------------------------------ bool cij_updated = false ; while (pA < pA_end && pB < pB_end) { int64_t ia = Ai [pA] ; int64_t ib = Bi [pB] ; if (ia < ib) { // A(ia,i) appears before B(ib,j) // discard all entries A(ia:ib-1,i) int64_t pleft = pA + 1 ; int64_t pright = pA_end - 1 ; GB_TRIM_BINARY_SEARCH (ib, Ai, pleft, pright) ; ASSERT (pleft > pA) ; pA = pleft ; } else if (ib < ia) { // B(ib,j) appears before A(ia,i) pB++ ; } else // ia == ib == k { // A(k,i) and B(k,j) are next entries to merge GB_DOT_MERGE ; } } if (cij_updated) GB_PUTC (cij, pC) ; } else if (bjnz > 8 * ainz) { //------------------------------------------------------ // A(:,i) is very sparse compared to B(:,j) //------------------------------------------------------ bool cij_updated = false ; while (pA < pA_end && pB < pB_end) { int64_t ia = Ai [pA] ; int64_t ib = Bi [pB] ; if (ia < ib) { // A(ia,i) appears before B(ib,j) pA++ ; } else if (ib < ia) { // B(ib,j) appears before A(ia,i) // discard all entries B(ib:ia-1,j) int64_t pleft = pB + 1 ; int64_t pright = pB_end - 1 ; GB_TRIM_BINARY_SEARCH (ia, Bi, pleft, pright) ; ASSERT (pleft > pB) ; pB = pleft ; } else // ia == ib == k { // A(k,i) and B(k,j) are next entries to merge GB_DOT_MERGE ; } } if (cij_updated) GB_PUTC (cij, pC) ; } else { //------------------------------------------------------ // A(:,i) and B(:,j) have about the same sparsity //------------------------------------------------------ bool cij_updated = false ; while (pA < pA_end && pB < pB_end) { int64_t ia = Ai [pA] ; int64_t ib = Bi [pB] ; if (ia < ib) { // A(ia,i) appears before B(ib,j) pA++ ; } else if (ib < ia) { // B(ib,j) appears before A(ia,i) pB++ ; } else // ia == ib == k { // A(k,i) and B(k,j) are the entries to merge GB_DOT_MERGE ; } } if (cij_updated) GB_PUTC (cij, pC) ; } } } } } }
distance_calcuation_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Aditya Ghantasala #if !defined(CHIMERA_DISTANCE_CALCULATION_UTILITY ) #define CHIMERA_DISTANCE_CALCULATION_UTILITY // System includes // External includes // Project includes #include "includes/define.h" #include "processes/variational_distance_calculation_process.h" #include "utilities/parallel_levelset_distance_calculator.h" #include "processes/calculate_distance_to_skin_process.h" #include "utilities/variable_utils.h" namespace Kratos { ///@name Kratos Classes ///@{ /// Utility for calculating the Distance on a given modelpart template <int TDim> class KRATOS_API(CHIMERA_APPLICATION) ChimeraDistanceCalculationUtility { public: ///@name Type Definitions ///@{ /// Pointer definition of ChimeraDistanceCalculationUtility KRATOS_CLASS_POINTER_DEFINITION(ChimeraDistanceCalculationUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. ChimeraDistanceCalculationUtility() = delete; /// Destructor. /// Deleted copy constructor ChimeraDistanceCalculationUtility(const ChimeraDistanceCalculationUtility& rOther) = delete; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Calculates distance on the whole of rBackgroundModelPart from rSkinModelPart * @param rBackgroundModelPart The background modelpart where distances are calculated. * @param rSkinModelPart The skin modelpart from where the distances are calculated */ static inline void CalculateDistance(ModelPart &rBackgroundModelPart, ModelPart &rSkinModelPart) { typedef CalculateDistanceToSkinProcess<TDim> CalculateDistanceToSkinProcessType; const int nnodes = static_cast<int>(rBackgroundModelPart.NumberOfNodes()); #pragma omp parallel for for (int i_node = 0; i_node < nnodes; ++i_node) { auto it_node = rBackgroundModelPart.NodesBegin() + i_node; it_node->FastGetSolutionStepValue(DISTANCE, 0) = 0.0; it_node->FastGetSolutionStepValue(DISTANCE, 1) = 0.0; it_node->SetValue(DISTANCE, 0.0); } CalculateDistanceToSkinProcessType(rBackgroundModelPart, rSkinModelPart).Execute(); unsigned int max_level = 100; double max_distance = 200; auto p_distance_smoother = Kratos::make_shared<ParallelDistanceCalculator<TDim>>(); p_distance_smoother->CalculateDistances(rBackgroundModelPart, DISTANCE, NODAL_AREA, max_level, max_distance); VariableUtils().CopyScalarVar(DISTANCE, CHIMERA_DISTANCE, rBackgroundModelPart.Nodes()); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ ///@} }; // Class ChimeraDistanceCalculationUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos. #endif // DISTANCE_CALCULATION_UTILITY defined
semantics.c
/* Perform the semantic phase of parsing, i.e., the process of building tree structure, checking semantic consistency, and building RTL. These routines are used both during actual parsing and during the instantiation of template functions. Copyright (C) 1998-2020 Free Software Foundation, Inc. Written by Mark Mitchell (mmitchell@usa.net) based on code found formerly in parse.y and pt.c. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "target.h" #include "bitmap.h" #include "cp-tree.h" #include "stringpool.h" #include "cgraph.h" #include "stmt.h" #include "varasm.h" #include "stor-layout.h" #include "c-family/c-objc.h" #include "tree-inline.h" #include "intl.h" #include "tree-iterator.h" #include "omp-general.h" #include "convert.h" #include "stringpool.h" #include "attribs.h" #include "gomp-constants.h" #include "predict.h" #include "memmodel.h" /* There routines provide a modular interface to perform many parsing operations. They may therefore be used during actual parsing, or during template instantiation, which may be regarded as a degenerate form of parsing. */ static tree maybe_convert_cond (tree); static tree finalize_nrv_r (tree *, int *, void *); static tree capture_decltype (tree); /* Used for OpenMP non-static data member privatization. */ static hash_map<tree, tree> *omp_private_member_map; static vec<tree> omp_private_member_vec; static bool omp_private_member_ignore_next; /* Deferred Access Checking Overview --------------------------------- Most C++ expressions and declarations require access checking to be performed during parsing. However, in several cases, this has to be treated differently. For member declarations, access checking has to be deferred until more information about the declaration is known. For example: class A { typedef int X; public: X f(); }; A::X A::f(); A::X g(); When we are parsing the function return type `A::X', we don't really know if this is allowed until we parse the function name. Furthermore, some contexts require that access checking is never performed at all. These include class heads, and template instantiations. Typical use of access checking functions is described here: 1. When we enter a context that requires certain access checking mode, the function `push_deferring_access_checks' is called with DEFERRING argument specifying the desired mode. Access checking may be performed immediately (dk_no_deferred), deferred (dk_deferred), or not performed (dk_no_check). 2. When a declaration such as a type, or a variable, is encountered, the function `perform_or_defer_access_check' is called. It maintains a vector of all deferred checks. 3. The global `current_class_type' or `current_function_decl' is then setup by the parser. `enforce_access' relies on these information to check access. 4. Upon exiting the context mentioned in step 1, `perform_deferred_access_checks' is called to check all declaration stored in the vector. `pop_deferring_access_checks' is then called to restore the previous access checking mode. In case of parsing error, we simply call `pop_deferring_access_checks' without `perform_deferred_access_checks'. */ struct GTY(()) deferred_access { /* A vector representing name-lookups for which we have deferred checking access controls. We cannot check the accessibility of names used in a decl-specifier-seq until we know what is being declared because code like: class A { class B {}; B* f(); } A::B* A::f() { return 0; } is valid, even though `A::B' is not generally accessible. */ vec<deferred_access_check, va_gc> *deferred_access_checks; /* The current mode of access checks. */ enum deferring_kind deferring_access_checks_kind; }; /* Data for deferred access checking. */ static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack; static GTY(()) unsigned deferred_access_no_check; /* Save the current deferred access states and start deferred access checking iff DEFER_P is true. */ void push_deferring_access_checks (deferring_kind deferring) { /* For context like template instantiation, access checking disabling applies to all nested context. */ if (deferred_access_no_check || deferring == dk_no_check) deferred_access_no_check++; else { deferred_access e = {NULL, deferring}; vec_safe_push (deferred_access_stack, e); } } /* Save the current deferred access states and start deferred access checking, continuing the set of deferred checks in CHECKS. */ void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks) { push_deferring_access_checks (dk_deferred); if (!deferred_access_no_check) deferred_access_stack->last().deferred_access_checks = checks; } /* Resume deferring access checks again after we stopped doing this previously. */ void resume_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_deferred; } /* Stop deferring access checks. */ void stop_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred; } /* Discard the current deferred access checks and restore the previous states. */ void pop_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else deferred_access_stack->pop (); } /* Returns a TREE_LIST representing the deferred checks. The TREE_PURPOSE of each node is the type through which the access occurred; the TREE_VALUE is the declaration named. */ vec<deferred_access_check, va_gc> * get_deferred_access_checks (void) { if (deferred_access_no_check) return NULL; else return (deferred_access_stack->last().deferred_access_checks); } /* Take current deferred checks and combine with the previous states if we also defer checks previously. Otherwise perform checks now. */ void pop_to_parent_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else { vec<deferred_access_check, va_gc> *checks; deferred_access *ptr; checks = (deferred_access_stack->last ().deferred_access_checks); deferred_access_stack->pop (); ptr = &deferred_access_stack->last (); if (ptr->deferring_access_checks_kind == dk_no_deferred) { /* Check access. */ perform_access_checks (checks, tf_warning_or_error); } else { /* Merge with parent. */ int i, j; deferred_access_check *chk, *probe; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe) { if (probe->binfo == chk->binfo && probe->decl == chk->decl && probe->diag_decl == chk->diag_decl) goto found; } /* Insert into parent's checks. */ vec_safe_push (ptr->deferred_access_checks, *chk); found:; } } } } /* If the current scope isn't allowed to access DECL along BASETYPE_PATH, give an error, or if we're parsing a function or class template, defer the access check to be performed at instantiation time. The most derived class in BASETYPE_PATH is the one used to qualify DECL. DIAG_DECL is the declaration to use in the error diagnostic. */ static bool enforce_access (tree basetype_path, tree decl, tree diag_decl, tsubst_flags_t complain, access_failure_info *afi = NULL) { gcc_assert (TREE_CODE (basetype_path) == TREE_BINFO); if (flag_new_inheriting_ctors && DECL_INHERITED_CTOR (decl)) { /* 7.3.3/18: The additional constructors are accessible if they would be accessible when used to construct an object of the corresponding base class. */ decl = strip_inheriting_ctors (decl); basetype_path = lookup_base (basetype_path, DECL_CONTEXT (decl), ba_any, NULL, complain); } tree cs = current_scope (); if (processing_template_decl && (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL)) if (tree template_info = get_template_info (cs)) { /* When parsing a function or class template, we in general need to defer access checks until template instantiation time, since a friend declaration may grant access only to a particular specialization of the template. */ if (accessible_p (basetype_path, decl, /*consider_local_p=*/true)) /* But if the member is deemed accessible at parse time, then we can assume it'll be accessible at instantiation time. */ return true; /* Access of a dependent decl should be rechecked after tsubst'ing into the user of the decl, rather than explicitly deferring the check here. */ gcc_assert (!uses_template_parms (decl)); if (TREE_CODE (decl) == FIELD_DECL) gcc_assert (!uses_template_parms (DECL_CONTEXT (decl))); /* Defer this access check until instantiation time. */ deferred_access_check access_check; access_check.binfo = basetype_path; access_check.decl = decl; access_check.diag_decl = diag_decl; access_check.loc = input_location; vec_safe_push (TI_DEFERRED_ACCESS_CHECKS (template_info), access_check); return true; } if (!accessible_p (basetype_path, decl, /*consider_local_p=*/true)) { if (flag_new_inheriting_ctors) diag_decl = strip_inheriting_ctors (diag_decl); if (complain & tf_error) complain_about_access (decl, diag_decl, true); if (afi) afi->record_access_failure (basetype_path, decl, diag_decl); return false; } return true; } /* Perform the access checks in CHECKS. The TREE_PURPOSE of each node is the BINFO indicating the qualifying scope used to access the DECL node stored in the TREE_VALUE of the node. If CHECKS is empty or we aren't in SFINAE context or all the checks succeed return TRUE, otherwise FALSE. */ bool perform_access_checks (vec<deferred_access_check, va_gc> *checks, tsubst_flags_t complain) { int i; deferred_access_check *chk; location_t loc = input_location; bool ok = true; if (!checks) return true; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { input_location = chk->loc; ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain); } input_location = loc; return (complain & tf_error) ? true : ok; } /* Perform the deferred access checks. After performing the checks, we still have to keep the list `deferred_access_stack->deferred_access_checks' since we may want to check access for them again later in a different context. For example: class A { typedef int X; static X a; }; A::X A::a, x; // No error for `A::a', error for `x' We have to perform deferred access of `A::X', first with `A::a', next with `x'. Return value like perform_access_checks above. */ bool perform_deferred_access_checks (tsubst_flags_t complain) { return perform_access_checks (get_deferred_access_checks (), complain); } /* Defer checking the accessibility of DECL, when looked up in BINFO. DIAG_DECL is the declaration to use to print diagnostics. Return value like perform_access_checks above. If non-NULL, report failures to AFI. */ bool perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl, tsubst_flags_t complain, access_failure_info *afi) { int i; deferred_access *ptr; deferred_access_check *chk; /* Exit if we are in a context that no access checking is performed. */ if (deferred_access_no_check) return true; gcc_assert (TREE_CODE (binfo) == TREE_BINFO); ptr = &deferred_access_stack->last (); /* If we are not supposed to defer access checks, just check now. */ if (ptr->deferring_access_checks_kind == dk_no_deferred) { bool ok = enforce_access (binfo, decl, diag_decl, complain, afi); return (complain & tf_error) ? true : ok; } /* See if we are already going to perform this check. */ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk) { if (chk->decl == decl && chk->binfo == binfo && chk->diag_decl == diag_decl) { return true; } } /* If not, record the check. */ deferred_access_check new_access = {binfo, decl, diag_decl, input_location}; vec_safe_push (ptr->deferred_access_checks, new_access); return true; } /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return current_stmt_tree ()->stmts_are_full_exprs_p; } /* T is a statement. Add it to the statement-tree. This is the C++ version. The C/ObjC frontends have a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (EXPR_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ if (STATEMENT_CODE_P (TREE_CODE (t))) STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ gcc_checking_assert (!stmt_list_stack->is_empty ()); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Returns the stmt_tree to which statements are currently being added. */ stmt_tree current_stmt_tree (void) { return (cfun ? &cfun->language->base.x_stmt_tree : &scope_chain->x_stmt_tree); } /* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */ static tree maybe_cleanup_point_expr (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr); return expr; } /* Like maybe_cleanup_point_expr except have the type of the new expression be void so we don't need to create a temporary variable to hold the inner expression. The reason why we do this is because the original type might be an aggregate and we cannot create a temporary variable for that type. */ tree maybe_cleanup_point_expr_void (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (void_type_node, expr); return expr; } /* Create a declaration statement for the declaration given by the DECL. */ void add_decl_expr (tree decl) { tree r = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); if (DECL_INITIAL (decl) || (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl)))) r = maybe_cleanup_point_expr_void (r); add_stmt (r); } /* Finish a scope. */ tree do_poplevel (tree stmt_list) { tree block = NULL; if (stmts_are_full_exprs_p ()) block = poplevel (kept_level_p (), 1, 0); stmt_list = pop_stmt_list (stmt_list); if (!processing_template_decl) { stmt_list = c_build_bind_expr (input_location, block, stmt_list); /* ??? See c_end_compound_stmt re statement expressions. */ } return stmt_list; } /* Begin a new scope. */ static tree do_pushlevel (scope_kind sk) { tree ret = push_stmt_list (); if (stmts_are_full_exprs_p ()) begin_scope (sk, NULL); return ret; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl); CLEANUP_EH_ONLY (stmt) = eh_only; add_stmt (stmt); CLEANUP_BODY (stmt) = push_stmt_list (); } /* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all the current loops, represented by 'NULL_TREE' if we've seen a possible exit, and 'error_mark_node' if not. This is currently used only to suppress the warning about a function with no return statements, and therefore we don't bother noting returns as possible exits. We also don't bother with gotos. */ static void begin_maybe_infinite_loop (tree cond) { /* Only track this while parsing a function, not during instantiation. */ if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; bool maybe_infinite = true; if (cond) { cond = fold_non_dependent_expr (cond); maybe_infinite = integer_nonzerop (cond); } vec_safe_push (cp_function_chain->infinite_loops, maybe_infinite ? error_mark_node : NULL_TREE); } /* A break is a possible exit for the current loop. */ void break_maybe_infinite_loop (void) { if (!cfun) return; cp_function_chain->infinite_loops->last() = NULL_TREE; } /* If we reach the end of the loop without seeing a possible exit, we have an infinite loop. */ static void end_maybe_infinite_loop (tree cond) { if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; tree current = cp_function_chain->infinite_loops->pop(); if (current != NULL_TREE) { cond = fold_non_dependent_expr (cond); if (integer_nonzerop (cond)) current_function_infinite_loop = 1; } } /* Begin a conditional that might contain a declaration. When generating normal code, we want the declaration to appear before the statement containing the conditional. When generating template code, we want the conditional to be rendered as the raw DECL_EXPR. */ static void begin_cond (tree *cond_p) { if (processing_template_decl) *cond_p = push_stmt_list (); } /* Finish such a conditional. */ static void finish_cond (tree *cond_p, tree expr) { if (processing_template_decl) { tree cond = pop_stmt_list (*cond_p); if (expr == NULL_TREE) /* Empty condition in 'for'. */ gcc_assert (empty_expr_stmt_p (cond)); else if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; else if (!empty_expr_stmt_p (cond)) expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr); } *cond_p = expr; } /* If *COND_P specifies a conditional with a declaration, transform the loop such that while (A x = 42) { } for (; A x = 42;) { } becomes while (true) { A x = 42; if (!x) break; } for (;;) { A x = 42; if (!x) break; } The statement list for BODY will be empty if the conditional did not declare anything. */ static void simplify_loop_decl_cond (tree *cond_p, tree body) { tree cond, if_stmt; if (!TREE_SIDE_EFFECTS (body)) return; cond = *cond_p; *cond_p = boolean_true_node; if_stmt = begin_if_stmt (); cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, false, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_break_stmt (); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } /* Finish a goto-statement. */ tree finish_goto_stmt (tree destination) { if (identifier_p (destination)) destination = lookup_label (destination); /* We warn about unused labels with -Wunused. That means we have to mark the used labels as used. */ if (TREE_CODE (destination) == LABEL_DECL) TREE_USED (destination) = 1; else { destination = mark_rvalue_use (destination); if (!processing_template_decl) { destination = cp_convert (ptr_type_node, destination, tf_warning_or_error); if (error_operand_p (destination)) return NULL_TREE; destination = fold_build_cleanup_point_expr (TREE_TYPE (destination), destination); } } check_goto (destination); add_stmt (build_predict_expr (PRED_GOTO, NOT_TAKEN)); return add_stmt (build_stmt (input_location, GOTO_EXPR, destination)); } /* COND is the condition-expression for an if, while, etc., statement. Convert it to a boolean value, if appropriate. In addition, verify sequence points if -Wsequence-point is enabled. */ static tree maybe_convert_cond (tree cond) { /* Empty conditions remain empty. */ if (!cond) return NULL_TREE; /* Wait until we instantiate templates before doing conversion. */ if (type_dependent_expression_p (cond)) return cond; if (warn_sequence_point && !processing_template_decl) verify_sequence_points (cond); /* Do the conversion. */ cond = convert_from_reference (cond); if (TREE_CODE (cond) == MODIFY_EXPR && !TREE_NO_WARNING (cond) && warn_parentheses && warning_at (cp_expr_loc_or_input_loc (cond), OPT_Wparentheses, "suggest parentheses around " "assignment used as truth value")) TREE_NO_WARNING (cond) = 1; return condition_conversion (cond); } /* Finish an expression-statement, whose EXPRESSION is as indicated. */ tree finish_expr_stmt (tree expr) { tree r = NULL_TREE; location_t loc = EXPR_LOCATION (expr); if (expr != NULL_TREE) { /* If we ran into a problem, make sure we complained. */ gcc_assert (expr != error_mark_node || seen_error ()); if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; /* Simplification of inner statement expressions, compound exprs, etc can result in us already having an EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) { if (TREE_CODE (expr) != EXPR_STMT) expr = build_stmt (loc, EXPR_STMT, expr); expr = maybe_cleanup_point_expr_void (expr); } r = add_stmt (expr); } return r; } /* Begin an if-statement. Returns a newly created IF_STMT if appropriate. */ tree begin_if_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, IF_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); current_binding_level->this_entity = r; begin_cond (&IF_COND (r)); return r; } /* Returns true if FN, a CALL_EXPR, is a call to std::is_constant_evaluated or __builtin_is_constant_evaluated. */ static bool is_std_constant_evaluated_p (tree fn) { /* std::is_constant_evaluated takes no arguments. */ if (call_expr_nargs (fn) != 0) return false; tree fndecl = cp_get_callee_fndecl_nofold (fn); if (fndecl == NULL_TREE) return false; if (fndecl_built_in_p (fndecl, CP_BUILT_IN_IS_CONSTANT_EVALUATED, BUILT_IN_FRONTEND)) return true; if (!decl_in_std_namespace_p (fndecl)) return false; tree name = DECL_NAME (fndecl); return name && id_equal (name, "is_constant_evaluated"); } /* Process the COND of an if-statement, which may be given by IF_STMT. */ tree finish_if_stmt_cond (tree cond, tree if_stmt) { cond = maybe_convert_cond (cond); if (IF_STMT_CONSTEXPR_P (if_stmt) && !type_dependent_expression_p (cond) && require_constant_expression (cond) && !instantiation_dependent_expression_p (cond) /* Wait until instantiation time, since only then COND has been converted to bool. */ && TYPE_MAIN_VARIANT (TREE_TYPE (cond)) == boolean_type_node) { /* if constexpr (std::is_constant_evaluated()) is always true, so give the user a clue. */ if (warn_tautological_compare) { tree t = cond; if (TREE_CODE (t) == CLEANUP_POINT_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == CALL_EXPR && is_std_constant_evaluated_p (t)) warning_at (EXPR_LOCATION (cond), OPT_Wtautological_compare, "%qs always evaluates to true in %<if constexpr%>", "std::is_constant_evaluated"); } cond = instantiate_non_dependent_expr (cond); cond = cxx_constant_value (cond, NULL_TREE); } finish_cond (&IF_COND (if_stmt), cond); add_stmt (if_stmt); THEN_CLAUSE (if_stmt) = push_stmt_list (); return cond; } /* Finish the then-clause of an if-statement, which may be given by IF_STMT. */ tree finish_then_clause (tree if_stmt) { THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt)); return if_stmt; } /* Begin the else-clause of an if-statement. */ void begin_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the else-clause of an if-statement, which may be given by IF_STMT. */ void finish_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt)); } /* Callback for cp_walk_tree to mark all {VAR,PARM}_DECLs in a tree as read. */ static tree maybe_mark_exp_read_r (tree *tp, int *, void *) { tree t = *tp; if (VAR_P (t) || TREE_CODE (t) == PARM_DECL) mark_exp_read (t); return NULL_TREE; } /* Finish an if-statement. */ void finish_if_stmt (tree if_stmt) { tree scope = IF_SCOPE (if_stmt); IF_SCOPE (if_stmt) = NULL; if (IF_STMT_CONSTEXPR_P (if_stmt)) { /* Prevent various -Wunused warnings. We might not instantiate either of these branches, so we would not mark the variables used in that branch as read. */ cp_walk_tree_without_duplicates (&THEN_CLAUSE (if_stmt), maybe_mark_exp_read_r, NULL); cp_walk_tree_without_duplicates (&ELSE_CLAUSE (if_stmt), maybe_mark_exp_read_r, NULL); } add_stmt (do_poplevel (scope)); } /* Begin a while-statement. Returns a newly created WHILE_STMT if appropriate. */ tree begin_while_stmt (void) { tree r; r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE); add_stmt (r); WHILE_BODY (r) = do_pushlevel (sk_block); begin_cond (&WHILE_COND (r)); return r; } /* Process the COND of a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); finish_cond (&WHILE_COND (while_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt)); } /* Finish a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt (tree while_stmt) { end_maybe_infinite_loop (boolean_true_node); WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt)); } /* Begin a do-statement. Returns a newly created DO_STMT if appropriate. */ tree begin_do_stmt (void) { tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE); begin_maybe_infinite_loop (boolean_true_node); add_stmt (r); DO_BODY (r) = push_stmt_list (); return r; } /* Finish the body of a do-statement, which may be given by DO_STMT. */ void finish_do_body (tree do_stmt) { tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt)); if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body)) body = STATEMENT_LIST_TAIL (body)->stmt; if (IS_EMPTY_STMT (body)) warning (OPT_Wempty_body, "suggest explicit braces around empty body in %<do%> statement"); } /* Finish a do-statement, which may be given by DO_STMT, and whose COND is as indicated. */ void finish_do_stmt (tree cond, tree do_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); end_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); DO_COND (do_stmt) = cond; } /* Finish a return-statement. The EXPRESSION returned, if any, is as indicated. */ tree finish_return_stmt (tree expr) { tree r; bool no_warning; expr = check_return_expr (expr, &no_warning); if (error_operand_p (expr) || (flag_openmp && !check_omp_return ())) { /* Suppress -Wreturn-type for this function. */ if (warn_return_type) TREE_NO_WARNING (current_function_decl) = true; return error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); if (DECL_DESTRUCTOR_P (current_function_decl) || (DECL_CONSTRUCTOR_P (current_function_decl) && targetm.cxx.cdtor_returns_this ())) { /* Similarly, all destructors must run destructors for base-classes before returning. So, all returns in a destructor get sent to the DTOR_LABEL; finish_function emits code to return a value there. */ return finish_goto_stmt (cdtor_label); } } r = build_stmt (input_location, RETURN_EXPR, expr); TREE_NO_WARNING (r) |= no_warning; r = maybe_cleanup_point_expr_void (r); r = add_stmt (r); return r; } /* Begin the scope of a for-statement or a range-for-statement. Both the returned trees are to be used in a call to begin_for_stmt or begin_range_for_stmt. */ tree begin_for_scope (tree *init) { tree scope = do_pushlevel (sk_for); if (processing_template_decl) *init = push_stmt_list (); else *init = NULL_TREE; return scope; } /* Begin a for-statement. Returns a new FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE */ tree begin_for_stmt (tree scope, tree init) { tree r; r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init); scope = begin_for_scope (&init); } FOR_INIT_STMT (r) = init; FOR_SCOPE (r) = scope; return r; } /* Finish the init-statement of a for-statement, which may be given by FOR_STMT. */ void finish_init_stmt (tree for_stmt) { if (processing_template_decl) FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt)); add_stmt (for_stmt); FOR_BODY (for_stmt) = do_pushlevel (sk_block); begin_cond (&FOR_COND (for_stmt)); } /* Finish the COND of a for-statement, which may be given by FOR_STMT. */ void finish_for_cond (tree cond, tree for_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); finish_cond (&FOR_COND (for_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt)); } /* Finish the increment-EXPRESSION in a for-statement, which may be given by FOR_STMT. */ void finish_for_expr (tree expr, tree for_stmt) { if (!expr) return; /* If EXPR is an overloaded function, issue an error; there is no context available to use to perform overload resolution. */ if (type_unknown_p (expr)) { cxx_incomplete_type_error (expr, TREE_TYPE (expr)); expr = error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; FOR_EXPR (for_stmt) = expr; } /* Finish the body of a for-statement, which may be given by FOR_STMT. The increment-EXPR for the loop must be provided. It can also finish RANGE_FOR_STMT. */ void finish_for_stmt (tree for_stmt) { end_maybe_infinite_loop (boolean_true_node); if (TREE_CODE (for_stmt) == RANGE_FOR_STMT) RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt)); else FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt)); /* Pop the scope for the body of the loop. */ tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT ? &RANGE_FOR_SCOPE (for_stmt) : &FOR_SCOPE (for_stmt)); tree scope = *scope_ptr; *scope_ptr = NULL; /* During parsing of the body, range for uses "__for_{range,begin,end} " decl names to make those unaccessible by code in the body. Change it to ones with underscore instead of space, so that it can be inspected in the debugger. */ tree range_for_decl[3] = { NULL_TREE, NULL_TREE, NULL_TREE }; gcc_assert (CPTI_FOR_BEGIN__IDENTIFIER == CPTI_FOR_RANGE__IDENTIFIER + 1 && CPTI_FOR_END__IDENTIFIER == CPTI_FOR_RANGE__IDENTIFIER + 2 && CPTI_FOR_RANGE_IDENTIFIER == CPTI_FOR_RANGE__IDENTIFIER + 3 && CPTI_FOR_BEGIN_IDENTIFIER == CPTI_FOR_BEGIN__IDENTIFIER + 3 && CPTI_FOR_END_IDENTIFIER == CPTI_FOR_END__IDENTIFIER + 3); for (int i = 0; i < 3; i++) { tree id = cp_global_trees[CPTI_FOR_RANGE__IDENTIFIER + i]; if (IDENTIFIER_BINDING (id) && IDENTIFIER_BINDING (id)->scope == current_binding_level) { range_for_decl[i] = IDENTIFIER_BINDING (id)->value; gcc_assert (VAR_P (range_for_decl[i]) && DECL_ARTIFICIAL (range_for_decl[i])); } } add_stmt (do_poplevel (scope)); for (int i = 0; i < 3; i++) if (range_for_decl[i]) DECL_NAME (range_for_decl[i]) = cp_global_trees[CPTI_FOR_RANGE_IDENTIFIER + i]; } /* Begin a range-for-statement. Returns a new RANGE_FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE . To finish it call finish_for_stmt(). */ tree begin_range_for_stmt (tree scope, tree init) { begin_maybe_infinite_loop (boolean_false_node); tree r = build_stmt (input_location, RANGE_FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init); scope = begin_for_scope (&init); } /* Since C++20, RANGE_FOR_STMTs can use the init tree, so save it. */ RANGE_FOR_INIT_STMT (r) = init; RANGE_FOR_SCOPE (r) = scope; return r; } /* Finish the head of a range-based for statement, which may be given by RANGE_FOR_STMT. DECL must be the declaration and EXPR must be the loop expression. */ void finish_range_for_decl (tree range_for_stmt, tree decl, tree expr) { if (processing_template_decl) RANGE_FOR_INIT_STMT (range_for_stmt) = pop_stmt_list (RANGE_FOR_INIT_STMT (range_for_stmt)); RANGE_FOR_DECL (range_for_stmt) = decl; RANGE_FOR_EXPR (range_for_stmt) = expr; add_stmt (range_for_stmt); RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block); } /* Finish a break-statement. */ tree finish_break_stmt (void) { /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ if (!block_may_fallthru (cur_stmt_list)) return void_node; note_break_stmt (); return add_stmt (build_stmt (input_location, BREAK_STMT)); } /* Finish a continue-statement. */ tree finish_continue_stmt (void) { return add_stmt (build_stmt (input_location, CONTINUE_STMT)); } /* Begin a switch-statement. Returns a new SWITCH_STMT if appropriate. */ tree begin_switch_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&SWITCH_STMT_COND (r)); return r; } /* Finish the cond of a switch-statement. */ void finish_switch_cond (tree cond, tree switch_stmt) { tree orig_type = NULL; if (!processing_template_decl) { /* Convert the condition to an integer or enumeration type. */ tree orig_cond = cond; cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true); if (cond == NULL_TREE) { error_at (cp_expr_loc_or_input_loc (orig_cond), "switch quantity not an integer"); cond = error_mark_node; } /* We want unlowered type here to handle enum bit-fields. */ orig_type = unlowered_expr_type (cond); if (TREE_CODE (orig_type) != ENUMERAL_TYPE) orig_type = TREE_TYPE (cond); if (cond != error_mark_node) { /* [stmt.switch] Integral promotions are performed. */ cond = perform_integral_promotions (cond); cond = maybe_cleanup_point_expr (cond); } } if (check_for_bare_parameter_packs (cond)) cond = error_mark_node; else if (!processing_template_decl && warn_sequence_point) verify_sequence_points (cond); finish_cond (&SWITCH_STMT_COND (switch_stmt), cond); SWITCH_STMT_TYPE (switch_stmt) = orig_type; add_stmt (switch_stmt); push_switch (switch_stmt); SWITCH_STMT_BODY (switch_stmt) = push_stmt_list (); } /* Finish the body of a switch-statement, which may be given by SWITCH_STMT. The COND to switch on is indicated. */ void finish_switch_stmt (tree switch_stmt) { tree scope; SWITCH_STMT_BODY (switch_stmt) = pop_stmt_list (SWITCH_STMT_BODY (switch_stmt)); pop_switch (); scope = SWITCH_STMT_SCOPE (switch_stmt); SWITCH_STMT_SCOPE (switch_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a try-block. Returns a newly-created TRY_BLOCK if appropriate. */ tree begin_try_block (void) { tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE); add_stmt (r); TRY_STMTS (r) = push_stmt_list (); return r; } /* Likewise, for a function-try-block. The block returned in *COMPOUND_STMT is an artificial outer scope, containing the function-try-block. */ tree begin_function_try_block (tree *compound_stmt) { tree r; /* This outer scope does not exist in the C++ standard, but we need a place to put __FUNCTION__ and similar variables. */ *compound_stmt = begin_compound_stmt (0); r = begin_try_block (); FN_TRY_BLOCK_P (r) = 1; return r; } /* Finish a try-block, which may be given by TRY_BLOCK. */ void finish_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); TRY_HANDLERS (try_block) = push_stmt_list (); } /* Finish the body of a cleanup try-block, which may be given by TRY_BLOCK. */ void finish_cleanup_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); } /* Finish an implicitly generated try-block, with a cleanup is given by CLEANUP. */ void finish_cleanup (tree cleanup, tree try_block) { TRY_HANDLERS (try_block) = cleanup; CLEANUP_P (try_block) = 1; } /* Likewise, for a function-try-block. */ void finish_function_try_block (tree try_block) { finish_try_block (try_block); /* FIXME : something queer about CTOR_INITIALIZER somehow following the try block, but moving it inside. */ in_function_try_handler = 1; } /* Finish a handler-sequence for a try-block, which may be given by TRY_BLOCK. */ void finish_handler_sequence (tree try_block) { TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block)); check_handlers (TRY_HANDLERS (try_block)); } /* Finish the handler-seq for a function-try-block, given by TRY_BLOCK. COMPOUND_STMT is the outer block created by begin_function_try_block. */ void finish_function_handler_sequence (tree try_block, tree compound_stmt) { in_function_try_handler = 0; finish_handler_sequence (try_block); finish_compound_stmt (compound_stmt); } /* Begin a handler. Returns a HANDLER if appropriate. */ tree begin_handler (void) { tree r; r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE); add_stmt (r); /* Create a binding level for the eh_info and the exception object cleanup. */ HANDLER_BODY (r) = do_pushlevel (sk_catch); return r; } /* Finish the handler-parameters for a handler, which may be given by HANDLER. DECL is the declaration for the catch parameter, or NULL if this is a `catch (...)' clause. */ void finish_handler_parms (tree decl, tree handler) { tree type = NULL_TREE; if (processing_template_decl) { if (decl) { decl = pushdecl (decl); decl = push_template_decl (decl); HANDLER_PARMS (handler) = decl; type = TREE_TYPE (decl); } } else { type = expand_start_catch_block (decl); if (warn_catch_value && type != NULL_TREE && type != error_mark_node && !TYPE_REF_P (TREE_TYPE (decl))) { tree orig_type = TREE_TYPE (decl); if (CLASS_TYPE_P (orig_type)) { if (TYPE_POLYMORPHIC_P (orig_type)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wcatch_value_, "catching polymorphic type %q#T by value", orig_type); else if (warn_catch_value > 1) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wcatch_value_, "catching type %q#T by value", orig_type); } else if (warn_catch_value > 2) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wcatch_value_, "catching non-reference type %q#T", orig_type); } } HANDLER_TYPE (handler) = type; } /* Finish a handler, which may be given by HANDLER. The BLOCKs are the return value from the matching call to finish_handler_parms. */ void finish_handler (tree handler) { if (!processing_template_decl) expand_end_catch_block (); HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler)); } /* Begin a compound statement. FLAGS contains some bits that control the behavior and context. If BCS_NO_SCOPE is set, the compound statement does not define a scope. If BCS_FN_BODY is set, this is the outermost block of a function. If BCS_TRY_BLOCK is set, this is the block created on behalf of a TRY statement. Returns a token to be passed to finish_compound_stmt. */ tree begin_compound_stmt (unsigned int flags) { tree r; if (flags & BCS_NO_SCOPE) { r = push_stmt_list (); STATEMENT_LIST_NO_SCOPE (r) = 1; /* Normally, we try hard to keep the BLOCK for a statement-expression. But, if it's a statement-expression with a scopeless block, there's nothing to keep, and we don't want to accidentally keep a block *inside* the scopeless block. */ keep_next_level (false); } else { scope_kind sk = sk_block; if (flags & BCS_TRY_BLOCK) sk = sk_try; else if (flags & BCS_TRANSACTION) sk = sk_transaction; r = do_pushlevel (sk); } /* When processing a template, we need to remember where the braces were, so that we can set up identical scopes when instantiating the template later. BIND_EXPR is a handy candidate for this. Note that do_poplevel won't create a BIND_EXPR itself here (and thus result in nested BIND_EXPRs), since we don't build BLOCK nodes when processing templates. */ if (processing_template_decl) { r = build3 (BIND_EXPR, NULL, NULL, r, NULL); BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0; BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0; TREE_SIDE_EFFECTS (r) = 1; } return r; } /* Finish a compound-statement, which is given by STMT. */ void finish_compound_stmt (tree stmt) { if (TREE_CODE (stmt) == BIND_EXPR) { tree body = do_poplevel (BIND_EXPR_BODY (stmt)); /* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special, discard the BIND_EXPR so it can be merged with the containing STATEMENT_LIST. */ if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_HEAD (body) == NULL && !BIND_EXPR_BODY_BLOCK (stmt) && !BIND_EXPR_TRY_BLOCK (stmt)) stmt = body; else BIND_EXPR_BODY (stmt) = body; } else if (STATEMENT_LIST_NO_SCOPE (stmt)) stmt = pop_stmt_list (stmt); else { /* Destroy any ObjC "super" receivers that may have been created. */ objc_clear_super_receiver (); stmt = do_poplevel (stmt); } /* ??? See c_end_compound_stmt wrt statement expressions. */ add_stmt (stmt); } /* Finish an asm-statement, whose components are a STRING, some OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some LABELS. Also note whether the asm-statement should be considered volatile, and whether it is asm inline. */ tree finish_asm_stmt (location_t loc, int volatile_p, tree string, tree output_operands, tree input_operands, tree clobbers, tree labels, bool inline_p) { tree r; tree t; int ninputs = list_length (input_operands); int noutputs = list_length (output_operands); if (!processing_template_decl) { const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; tree operand; int i; oconstraints = XALLOCAVEC (const char *, noutputs); string = resolve_asm_operand_names (string, output_operands, input_operands, labels); for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i) { operand = TREE_VALUE (t); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (operand); operand = mark_lvalue_use (operand); if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error)) operand = error_mark_node; if (operand != error_mark_node && (TREE_READONLY (operand) || CP_TYPE_CONST_P (TREE_TYPE (operand)) /* Functions are not modifiable, even though they are lvalues. */ || FUNC_OR_METHOD_TYPE_P (TREE_TYPE (operand)) /* If it's an aggregate and any field is const, then it is effectively const. */ || (CLASS_TYPE_P (TREE_TYPE (operand)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (operand))))) cxx_readonly_error (loc, operand, lv_asm); tree *op = &operand; while (TREE_CODE (*op) == COMPOUND_EXPR) op = &TREE_OPERAND (*op, 1); switch (TREE_CODE (*op)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case MODIFY_EXPR: *op = genericize_compound_lvalue (*op); op = &TREE_OPERAND (*op, 1); break; default: break; } constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !cxx_mark_addressable (*op)) operand = error_mark_node; } else operand = error_mark_node; TREE_VALUE (t) = operand; } for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); bool constraint_parsed = parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is going to end up in memory, don't call decay_conversion. */ if (constraint_parsed && !allows_reg && allows_mem) operand = mark_lvalue_use (TREE_VALUE (t)); else operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error); /* If the type of the operand hasn't been determined (e.g., because it involves an overloaded function), then issue an error message. There's no context available to resolve the overloading. */ if (TREE_TYPE (operand) == unknown_type_node) { error_at (loc, "type of %<asm%> operand %qE could not be determined", TREE_VALUE (t)); operand = error_mark_node; } if (constraint_parsed) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (operand); tree *op = &operand; while (TREE_CODE (*op) == COMPOUND_EXPR) op = &TREE_OPERAND (*op, 1); switch (TREE_CODE (*op)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case MODIFY_EXPR: *op = genericize_compound_lvalue (*op); op = &TREE_OPERAND (*op, 1); break; default: break; } if (!cxx_mark_addressable (*op)) operand = error_mark_node; } else if (!allows_reg && !allows_mem) { /* If constraint allows neither register nor memory, try harder to get a constant. */ tree constop = maybe_constant_value (operand); if (TREE_CONSTANT (constop)) operand = constop; } } else operand = error_mark_node; TREE_VALUE (t) = operand; } } r = build_stmt (loc, ASM_EXPR, string, output_operands, input_operands, clobbers, labels); ASM_VOLATILE_P (r) = volatile_p || noutputs == 0; ASM_INLINE_P (r) = inline_p; r = maybe_cleanup_point_expr_void (r); return add_stmt (r); } /* Finish a label with the indicated NAME. Returns the new label. */ tree finish_label_stmt (tree name) { tree decl = define_label (input_location, name); if (decl == error_mark_node) return error_mark_node; add_stmt (build_stmt (input_location, LABEL_EXPR, decl)); return decl; } /* Finish a series of declarations for local labels. G++ allows users to declare "local" labels, i.e., labels with scope. This extension is useful when writing code involving statement-expressions. */ void finish_label_decl (tree name) { if (!at_function_scope_p ()) { error ("%<__label__%> declarations are only allowed in function scopes"); return; } add_decl_expr (declare_local_label (name)); } /* When DECL goes out of scope, make sure that CLEANUP is executed. */ void finish_decl_cleanup (tree decl, tree cleanup) { push_cleanup (decl, cleanup, false); } /* If the current scope exits with an exception, run CLEANUP. */ void finish_eh_cleanup (tree cleanup) { push_cleanup (NULL, cleanup, true); } /* The MEM_INITS is a list of mem-initializers, in reverse of the order they were written by the user. Each node is as for emit_mem_initializers. */ void finish_mem_initializers (tree mem_inits) { /* Reorder the MEM_INITS so that they are in the order they appeared in the source program. */ mem_inits = nreverse (mem_inits); if (processing_template_decl) { tree mem; for (mem = mem_inits; mem; mem = TREE_CHAIN (mem)) { /* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the check for bare parameter packs in the TREE_VALUE, because any parameter packs in the TREE_VALUE have already been bound as part of the TREE_PURPOSE. See make_pack_expansion for more information. */ if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION && check_for_bare_parameter_packs (TREE_VALUE (mem))) TREE_VALUE (mem) = error_mark_node; } add_stmt (build_min_nt_loc (UNKNOWN_LOCATION, CTOR_INITIALIZER, mem_inits)); } else emit_mem_initializers (mem_inits); } /* Obfuscate EXPR if it looks like an id-expression or member access so that the call to finish_decltype in do_auto_deduction will give the right result. If EVEN_UNEVAL, do this even in unevaluated context. */ tree force_paren_expr (tree expr, bool even_uneval) { /* This is only needed for decltype(auto) in C++14. */ if (cxx_dialect < cxx14) return expr; /* If we're in unevaluated context, we can't be deducing a return/initializer type, so we don't need to mess with this. */ if (cp_unevaluated_operand && !even_uneval) return expr; if (!DECL_P (tree_strip_any_location_wrapper (expr)) && TREE_CODE (expr) != COMPONENT_REF && TREE_CODE (expr) != SCOPE_REF) return expr; location_t loc = cp_expr_location (expr); if (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == SCOPE_REF) REF_PARENTHESIZED_P (expr) = true; else if (processing_template_decl) expr = build1_loc (loc, PAREN_EXPR, TREE_TYPE (expr), expr); else { expr = build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (expr), expr); REF_PARENTHESIZED_P (expr) = true; } return expr; } /* If T is an id-expression obfuscated by force_paren_expr, undo the obfuscation and return the underlying id-expression. Otherwise return T. */ tree maybe_undo_parenthesized_ref (tree t) { if (cxx_dialect < cxx14) return t; if (INDIRECT_REF_P (t) && REF_PARENTHESIZED_P (t)) { t = TREE_OPERAND (t, 0); while (TREE_CODE (t) == NON_LVALUE_EXPR || TREE_CODE (t) == NOP_EXPR) t = TREE_OPERAND (t, 0); gcc_assert (TREE_CODE (t) == ADDR_EXPR || TREE_CODE (t) == STATIC_CAST_EXPR); t = TREE_OPERAND (t, 0); } else if (TREE_CODE (t) == PAREN_EXPR) t = TREE_OPERAND (t, 0); else if (TREE_CODE (t) == VIEW_CONVERT_EXPR && REF_PARENTHESIZED_P (t)) t = TREE_OPERAND (t, 0); return t; } /* Finish a parenthesized expression EXPR. */ cp_expr finish_parenthesized_expr (cp_expr expr) { if (EXPR_P (expr)) /* This inhibits warnings in c_common_truthvalue_conversion. */ TREE_NO_WARNING (expr) = 1; if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == SCOPE_REF) /* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be enclosed in parentheses. */ PTRMEM_OK_P (expr) = 0; tree stripped_expr = tree_strip_any_location_wrapper (expr); if (TREE_CODE (stripped_expr) == STRING_CST) PAREN_STRING_LITERAL_P (stripped_expr) = 1; expr = cp_expr (force_paren_expr (expr), expr.get_location ()); return expr; } /* Finish a reference to a non-static data member (DECL) that is not preceded by `.' or `->'. */ tree finish_non_static_data_member (tree decl, tree object, tree qualifying_scope) { gcc_assert (TREE_CODE (decl) == FIELD_DECL); bool try_omp_private = !object && omp_private_member_map; tree ret; if (!object) { tree scope = qualifying_scope; if (scope == NULL_TREE) { scope = context_for_name_lookup (decl); if (!TYPE_P (scope)) { /* Can happen during error recovery (c++/85014). */ gcc_assert (seen_error ()); return error_mark_node; } } object = maybe_dummy_object (scope, NULL); } object = maybe_resolve_dummy (object, true); if (object == error_mark_node) return error_mark_node; /* DR 613/850: Can use non-static data members without an associated object in sizeof/decltype/alignof. */ if (is_dummy_object (object) && cp_unevaluated_operand == 0 && (!processing_template_decl || !current_class_ref)) { if (current_function_decl && DECL_STATIC_FUNCTION_P (current_function_decl)) error ("invalid use of member %qD in static member function", decl); else error ("invalid use of non-static data member %qD", decl); inform (DECL_SOURCE_LOCATION (decl), "declared here"); return error_mark_node; } if (current_class_ptr) TREE_USED (current_class_ptr) = 1; if (processing_template_decl) { tree type = TREE_TYPE (decl); if (TYPE_REF_P (type)) /* Quals on the object don't matter. */; else if (PACK_EXPANSION_P (type)) /* Don't bother trying to represent this. */ type = NULL_TREE; else { /* Set the cv qualifiers. */ int quals = cp_type_quals (TREE_TYPE (object)); if (DECL_MUTABLE_P (decl)) quals &= ~TYPE_QUAL_CONST; quals |= cp_type_quals (TREE_TYPE (decl)); type = cp_build_qualified_type (type, quals); } if (qualifying_scope) /* Wrap this in a SCOPE_REF for now. */ ret = build_qualified_name (type, qualifying_scope, decl, /*template_p=*/false); else ret = (convert_from_reference (build_min (COMPONENT_REF, type, object, decl, NULL_TREE))); } /* If PROCESSING_TEMPLATE_DECL is nonzero here, then QUALIFYING_SCOPE is also non-null. */ else { tree access_type = TREE_TYPE (object); perform_or_defer_access_check (TYPE_BINFO (access_type), decl, decl, tf_warning_or_error); /* If the data member was named `C::M', convert `*this' to `C' first. */ if (qualifying_scope) { tree binfo = NULL_TREE; object = build_scoped_ref (object, qualifying_scope, &binfo); } ret = build_class_member_access_expr (object, decl, /*access_path=*/NULL_TREE, /*preserve_reference=*/false, tf_warning_or_error); } if (try_omp_private) { tree *v = omp_private_member_map->get (decl); if (v) ret = convert_from_reference (*v); } return ret; } /* DECL was the declaration to which a qualified-id resolved. Issue an error message if it is not accessible. If OBJECT_TYPE is non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the type of `*x', or `x', respectively. If the DECL was named as `A::B' then NESTED_NAME_SPECIFIER is `A'. Return value is like perform_access_checks above. */ bool check_accessibility_of_qualified_id (tree decl, tree object_type, tree nested_name_specifier, tsubst_flags_t complain) { /* If we're not checking, return immediately. */ if (deferred_access_no_check) return true; /* Determine the SCOPE of DECL. */ tree scope = context_for_name_lookup (decl); /* If the SCOPE is not a type, then DECL is not a member. */ if (!TYPE_P (scope) /* If SCOPE is dependent then we can't perform this access check now, and since we'll perform this access check again after substitution there's no need to explicitly defer it. */ || dependent_type_p (scope)) return true; tree qualifying_type = NULL_TREE; /* Compute the scope through which DECL is being accessed. */ if (object_type /* OBJECT_TYPE might not be a class type; consider: class A { typedef int I; }; I *p; p->A::I::~I(); In this case, we will have "A::I" as the DECL, but "I" as the OBJECT_TYPE. */ && CLASS_TYPE_P (object_type) && DERIVED_FROM_P (scope, object_type)) /* If we are processing a `->' or `.' expression, use the type of the left-hand side. */ qualifying_type = object_type; else if (nested_name_specifier) { /* If the reference is to a non-static member of the current class, treat it as if it were referenced through `this'. */ tree ct; if (DECL_NONSTATIC_MEMBER_P (decl) && current_class_ptr && DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ())) qualifying_type = ct; /* Otherwise, use the type indicated by the nested-name-specifier. */ else qualifying_type = nested_name_specifier; } else /* Otherwise, the name must be from the current class or one of its bases. */ qualifying_type = currently_open_derived_class (scope); if (qualifying_type /* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM or similar in a default argument value. */ && CLASS_TYPE_P (qualifying_type)) return perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl, decl, complain); return true; } /* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the class named to the left of the "::" operator. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. */ tree finish_qualified_id_expr (tree qualifying_class, tree expr, bool done, bool address_p, bool template_p, bool template_arg_p, tsubst_flags_t complain) { gcc_assert (TYPE_P (qualifying_class)); if (error_operand_p (expr)) return error_mark_node; if ((DECL_P (expr) || BASELINK_P (expr)) && !mark_used (expr, complain)) return error_mark_node; if (template_p) { if (TREE_CODE (expr) == UNBOUND_CLASS_TEMPLATE) { /* cp_parser_lookup_name thought we were looking for a type, but we're actually looking for a declaration. */ qualifying_class = TYPE_CONTEXT (expr); expr = TYPE_IDENTIFIER (expr); } else check_template_keyword (expr); } /* If EXPR occurs as the operand of '&', use special handling that permits a pointer-to-member. */ if (address_p && done) { if (TREE_CODE (expr) == SCOPE_REF) expr = TREE_OPERAND (expr, 1); expr = build_offset_ref (qualifying_class, expr, /*address_p=*/true, complain); return expr; } /* No need to check access within an enum. */ if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE && TREE_CODE (expr) != IDENTIFIER_NODE) return expr; /* Within the scope of a class, turn references to non-static members into expression of the form "this->...". */ if (template_arg_p) /* But, within a template argument, we do not want make the transformation, as there is no "this" pointer. */ ; else if (TREE_CODE (expr) == FIELD_DECL) { push_deferring_access_checks (dk_no_check); expr = finish_non_static_data_member (expr, NULL_TREE, qualifying_class); pop_deferring_access_checks (); } else if (BASELINK_P (expr)) { /* See if any of the functions are non-static members. */ /* If so, the expression may be relative to 'this'. */ if ((type_dependent_expression_p (expr) || !shared_member_p (expr)) && current_class_ptr && DERIVED_FROM_P (qualifying_class, current_nonlambda_class_type ())) expr = (build_class_member_access_expr (maybe_dummy_object (qualifying_class, NULL), expr, BASELINK_ACCESS_BINFO (expr), /*preserve_reference=*/false, complain)); else if (done) /* The expression is a qualified name whose address is not being taken. */ expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false, complain); } else if (!template_p && TREE_CODE (expr) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (expr)) { if (complain & tf_error) error ("%qE missing template arguments", expr); return error_mark_node; } else { /* In a template, return a SCOPE_REF for most qualified-ids so that we can check access at instantiation time. But if we're looking at a member of the current instantiation, we know we have access and building up the SCOPE_REF confuses non-type template argument handling. */ if (processing_template_decl && (!currently_open_class (qualifying_class) || TREE_CODE (expr) == IDENTIFIER_NODE || TREE_CODE (expr) == TEMPLATE_ID_EXPR || TREE_CODE (expr) == BIT_NOT_EXPR)) expr = build_qualified_name (TREE_TYPE (expr), qualifying_class, expr, template_p); else if (tree wrap = maybe_get_tls_wrapper_call (expr)) expr = wrap; expr = convert_from_reference (expr); } return expr; } /* Begin a statement-expression. The value returned must be passed to finish_stmt_expr. */ tree begin_stmt_expr (void) { return push_stmt_list (); } /* Process the final expression of a statement expression. EXPR can be NULL, if the final expression is empty. Return a STATEMENT_LIST containing all the statements in the statement-expression, or ERROR_MARK_NODE if there was an error. */ tree finish_stmt_expr_expr (tree expr, tree stmt_expr) { if (error_operand_p (expr)) { /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } /* If the last statement does not have "void" type, then the value of the last statement is the value of the entire expression. */ if (expr) { tree type = TREE_TYPE (expr); if (type && type_unknown_p (type)) { error ("a statement expression is an insufficient context" " for overload resolution"); TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } else if (processing_template_decl) { expr = build_stmt (input_location, EXPR_STMT, expr); expr = add_stmt (expr); /* Mark the last statement so that we can recognize it as such at template-instantiation time. */ EXPR_STMT_STMT_EXPR_RESULT (expr) = 1; } else if (VOID_TYPE_P (type)) { /* Just treat this like an ordinary statement. */ expr = finish_expr_stmt (expr); } else { /* It actually has a value we need to deal with. First, force it to be an rvalue so that we won't need to build up a copy constructor call later when we try to assign it to something. */ expr = force_rvalue (expr, tf_warning_or_error); if (error_operand_p (expr)) return error_mark_node; /* Update for array-to-pointer decay. */ type = TREE_TYPE (expr); /* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a normal statement, but don't convert to void or actually add the EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) expr = maybe_cleanup_point_expr (expr); add_stmt (expr); } /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = type; } return stmt_expr; } /* Finish a statement-expression. EXPR should be the value returned by the previous begin_stmt_expr. Returns an expression representing the statement-expression. */ tree finish_stmt_expr (tree stmt_expr, bool has_no_scope) { tree type; tree result; if (error_operand_p (stmt_expr)) { pop_stmt_list (stmt_expr); return error_mark_node; } gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST); type = TREE_TYPE (stmt_expr); result = pop_stmt_list (stmt_expr); TREE_TYPE (result) = type; if (processing_template_decl) { result = build_min (STMT_EXPR, type, result); TREE_SIDE_EFFECTS (result) = 1; STMT_EXPR_NO_SCOPE (result) = has_no_scope; } else if (CLASS_TYPE_P (type)) { /* Wrap the statement-expression in a TARGET_EXPR so that the temporary object created by the final expression is destroyed at the end of the full-expression containing the statement-expression. */ result = force_target_expr (type, result, tf_warning_or_error); } return result; } /* Returns the expression which provides the value of STMT_EXPR. */ tree stmt_expr_value_expr (tree stmt_expr) { tree t = STMT_EXPR_STMT (stmt_expr); if (TREE_CODE (t) == BIND_EXPR) t = BIND_EXPR_BODY (t); if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t)) t = STATEMENT_LIST_TAIL (t)->stmt; if (TREE_CODE (t) == EXPR_STMT) t = EXPR_STMT_EXPR (t); return t; } /* Return TRUE iff EXPR_STMT is an empty list of expression statements. */ bool empty_expr_stmt_p (tree expr_stmt) { tree body = NULL_TREE; if (expr_stmt == void_node) return true; if (expr_stmt) { if (TREE_CODE (expr_stmt) == EXPR_STMT) body = EXPR_STMT_EXPR (expr_stmt); else if (TREE_CODE (expr_stmt) == STATEMENT_LIST) body = expr_stmt; } if (body) { if (TREE_CODE (body) == STATEMENT_LIST) return tsi_end_p (tsi_start (body)); else return empty_expr_stmt_p (body); } return false; } /* Perform Koenig lookup. FN_EXPR is the postfix-expression representing the function (or functions) to call; ARGS are the arguments to the call. Returns the functions to be considered by overload resolution. */ cp_expr perform_koenig_lookup (cp_expr fn_expr, vec<tree, va_gc> *args, tsubst_flags_t complain) { tree identifier = NULL_TREE; tree functions = NULL_TREE; tree tmpl_args = NULL_TREE; bool template_id = false; location_t loc = fn_expr.get_location (); tree fn = fn_expr.get_value (); STRIP_ANY_LOCATION_WRAPPER (fn); if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) { /* Use a separate flag to handle null args. */ template_id = true; tmpl_args = TREE_OPERAND (fn, 1); fn = TREE_OPERAND (fn, 0); } /* Find the name of the overloaded function. */ if (identifier_p (fn)) identifier = fn; else { functions = fn; identifier = OVL_NAME (functions); } /* A call to a namespace-scope function using an unqualified name. Do Koenig lookup -- unless any of the arguments are type-dependent. */ if (!any_type_dependent_arguments_p (args) && !any_dependent_template_arguments_p (tmpl_args)) { fn = lookup_arg_dependent (identifier, functions, args); if (!fn) { /* The unqualified name could not be resolved. */ if (complain & tf_error) fn = unqualified_fn_lookup_error (cp_expr (identifier, loc)); else fn = identifier; } } if (fn && template_id && fn != error_mark_node) fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args); return cp_expr (fn, loc); } /* Generate an expression for `FN (ARGS)'. This may change the contents of ARGS. If DISALLOW_VIRTUAL is true, the call to FN will be not generated as a virtual call, even if FN is virtual. (This flag is set when encountering an expression where the function name is explicitly qualified. For example a call to `X::f' never generates a virtual call.) Returns code for the call. */ tree finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, bool koenig_p, tsubst_flags_t complain) { tree result; tree orig_fn; vec<tree, va_gc> *orig_args = *args; if (fn == error_mark_node) return error_mark_node; gcc_assert (!TYPE_P (fn)); /* If FN may be a FUNCTION_DECL obfuscated by force_paren_expr, undo it so that we can tell this is a call to a known function. */ fn = maybe_undo_parenthesized_ref (fn); STRIP_ANY_LOCATION_WRAPPER (fn); orig_fn = fn; if (processing_template_decl) { /* If FN is a local extern declaration or set thereof, look them up again at instantiation time. */ if (is_overloaded_fn (fn)) { tree ifn = get_first_fn (fn); if (TREE_CODE (ifn) == FUNCTION_DECL && DECL_LOCAL_DECL_P (ifn)) orig_fn = DECL_NAME (ifn); } /* If the call expression is dependent, build a CALL_EXPR node with no type; type_dependent_expression_p recognizes expressions with no type as being dependent. */ if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args)) { result = build_min_nt_call_vec (orig_fn, *args); SET_EXPR_LOCATION (result, cp_expr_loc_or_input_loc (fn)); KOENIG_LOOKUP_P (result) = koenig_p; if (is_overloaded_fn (fn)) fn = get_fns (fn); if (cfun) { bool abnormal = true; for (lkp_iterator iter (fn); abnormal && iter; ++iter) { tree fndecl = STRIP_TEMPLATE (*iter); if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) abnormal = false; } /* FIXME: Stop warning about falling off end of non-void function. But this is wrong. Even if we only see no-return fns at this point, we could select a future-defined return fn during instantiation. Or vice-versa. */ if (abnormal) current_function_returns_abnormally = 1; } return result; } orig_args = make_tree_vector_copy (*args); if (!BASELINK_P (fn) && TREE_CODE (fn) != PSEUDO_DTOR_EXPR && TREE_TYPE (fn) != unknown_type_node) fn = build_non_dependent_expr (fn); make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) { tree member = TREE_OPERAND (fn, 1); if (BASELINK_P (member)) { tree object = TREE_OPERAND (fn, 0); return build_new_method_call (object, member, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } } /* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */ if (TREE_CODE (fn) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = baselink_for_fns (fn); result = NULL_TREE; if (BASELINK_P (fn)) { tree object; /* A call to a member function. From [over.call.func]: If the keyword this is in scope and refers to the class of that member function, or a derived class thereof, then the function call is transformed into a qualified function call using (*this) as the postfix-expression to the left of the . operator.... [Otherwise] a contrived object of type T becomes the implied object argument. In this situation: struct A { void f(); }; struct B : public A {}; struct C : public A { void g() { B::f(); }}; "the class of that member function" refers to `A'. But 11.2 [class.access.base] says that we need to convert 'this' to B* as part of the access, so we pass 'B' to maybe_dummy_object. */ if (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (get_first_fn (fn))) { /* A constructor call always uses a dummy object. (This constructor call which has the form A::A () is actually invalid and we are going to reject it later in build_new_method_call.) */ object = build_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn))); } else object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)), NULL); result = build_new_method_call (object, fn, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } else if (concept_check_p (fn)) { /* FN is actually a template-id referring to a concept definition. */ tree id = unpack_concept_check (fn); tree tmpl = TREE_OPERAND (id, 0); tree args = TREE_OPERAND (id, 1); if (!function_concept_p (tmpl)) { error_at (EXPR_LOC_OR_LOC (fn, input_location), "cannot call a concept as a function"); return error_mark_node; } /* Ensure the result is wrapped as a call expression. */ result = build_concept_check (tmpl, args, tf_warning_or_error); } else if (is_overloaded_fn (fn)) { /* If the function is an overloaded builtin, resolve it. */ if (TREE_CODE (fn) == FUNCTION_DECL && (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL || DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD)) result = resolve_overloaded_builtin (input_location, fn, *args); if (!result) { if (warn_sizeof_pointer_memaccess && (complain & tf_warning) && !vec_safe_is_empty (*args) && !processing_template_decl) { location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int i; for (i = 0; i < 3; i++) { tree t; sizeof_arg_loc[i] = UNKNOWN_LOCATION; sizeof_arg[i] = NULL_TREE; if (i >= (*args)->length ()) continue; t = (**args)[i]; if (TREE_CODE (t) != SIZEOF_EXPR) continue; if (SIZEOF_EXPR_TYPE_P (t)) sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0)); else sizeof_arg[i] = TREE_OPERAND (t, 0); sizeof_arg_loc[i] = EXPR_LOCATION (t); } sizeof_pointer_memaccess_warning (sizeof_arg_loc, fn, *args, sizeof_arg, same_type_ignoring_top_level_qualifiers_p); } if ((complain & tf_warning) && TREE_CODE (fn) == FUNCTION_DECL && fndecl_built_in_p (fn, BUILT_IN_MEMSET) && vec_safe_length (*args) == 3 && !any_type_dependent_arguments_p (*args)) { tree arg0 = (*orig_args)[0]; tree arg1 = (*orig_args)[1]; tree arg2 = (*orig_args)[2]; int literal_mask = ((literal_integer_zerop (arg1) << 1) | (literal_integer_zerop (arg2) << 2)); warn_for_memset (input_location, arg0, arg2, literal_mask); } /* A call to a namespace-scope function. */ result = build_new_function_call (fn, args, complain); } } else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR) { if (!vec_safe_is_empty (*args)) error ("arguments to destructor are not allowed"); /* C++20/DR: If the postfix-expression names a pseudo-destructor (in which case the postfix-expression is a possibly-parenthesized class member access), the function call destroys the object of scalar type denoted by the object expression of the class member access. */ tree ob = TREE_OPERAND (fn, 0); if (obvalue_p (ob)) result = build_trivial_dtor_call (ob, true); else /* No location to clobber. */ result = convert_to_void (ob, ICV_STATEMENT, complain); } else if (CLASS_TYPE_P (TREE_TYPE (fn))) /* If the "function" is really an object of class type, it might have an overloaded `operator ()'. */ result = build_op_call (fn, args, complain); if (!result) /* A call where the function is unknown. */ result = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && result != error_mark_node) { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; release_tree_vector (orig_args); result = convert_from_reference (result); } return result; } /* Finish a call to a postfix increment or decrement or EXPR. (Which is indicated by CODE, which should be POSTINCREMENT_EXPR or POSTDECREMENT_EXPR.) */ cp_expr finish_increment_expr (cp_expr expr, enum tree_code code) { /* input_location holds the location of the trailing operator token. Build a location of the form: expr++ ~~~~^~ with the caret at the operator token, ranging from the start of EXPR to the end of the operator token. */ location_t combined_loc = make_location (input_location, expr.get_start (), get_finish (input_location)); cp_expr result = build_x_unary_op (combined_loc, code, expr, tf_warning_or_error); /* TODO: build_x_unary_op doesn't honor the location, so set it here. */ result.set_location (combined_loc); return result; } /* Finish a use of `this'. Returns an expression for `this'. */ tree finish_this_expr (void) { tree result = NULL_TREE; if (current_class_ptr) { tree type = TREE_TYPE (current_class_ref); /* In a lambda expression, 'this' refers to the captured 'this'. */ if (LAMBDA_TYPE_P (type)) result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true); else result = current_class_ptr; } if (result) /* The keyword 'this' is a prvalue expression. */ return rvalue (result); tree fn = current_nonlambda_function (); if (fn && DECL_STATIC_FUNCTION_P (fn)) error ("%<this%> is unavailable for static member functions"); else if (fn) error ("invalid use of %<this%> in non-member function"); else error ("invalid use of %<this%> at top level"); return error_mark_node; } /* Finish a pseudo-destructor expression. If SCOPE is NULL, the expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is the TYPE for the type given. If SCOPE is non-NULL, the expression was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */ tree finish_pseudo_destructor_expr (tree object, tree scope, tree destructor, location_t loc) { if (object == error_mark_node || destructor == error_mark_node) return error_mark_node; gcc_assert (TYPE_P (destructor)); if (!processing_template_decl) { if (scope == error_mark_node) { error_at (loc, "invalid qualifying scope in pseudo-destructor name"); return error_mark_node; } if (is_auto (destructor)) destructor = TREE_TYPE (object); if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor)) { error_at (loc, "qualified type %qT does not match destructor name ~%qT", scope, destructor); return error_mark_node; } /* [expr.pseudo] says both: The type designated by the pseudo-destructor-name shall be the same as the object type. and: The cv-unqualified versions of the object type and of the type designated by the pseudo-destructor-name shall be the same type. We implement the more generous second sentence, since that is what most other compilers do. */ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object), destructor)) { error_at (loc, "%qE is not of type %qT", object, destructor); return error_mark_node; } } tree type = (type_dependent_expression_p (object) ? NULL_TREE : void_type_node); return build3_loc (loc, PSEUDO_DTOR_EXPR, type, object, scope, destructor); } /* Finish an expression of the form CODE EXPR. */ cp_expr finish_unary_op_expr (location_t op_loc, enum tree_code code, cp_expr expr, tsubst_flags_t complain) { /* Build a location of the form: ++expr ^~~~~~ with the caret at the operator token, ranging from the start of the operator token to the end of EXPR. */ location_t combined_loc = make_location (op_loc, op_loc, expr.get_finish ()); cp_expr result = build_x_unary_op (combined_loc, code, expr, complain); /* TODO: build_x_unary_op doesn't always honor the location. */ result.set_location (combined_loc); if (result == error_mark_node) return result; if (!(complain & tf_warning)) return result; tree result_ovl = result; tree expr_ovl = expr; if (!processing_template_decl) expr_ovl = cp_fully_fold (expr_ovl); if (!CONSTANT_CLASS_P (expr_ovl) || TREE_OVERFLOW_P (expr_ovl)) return result; if (!processing_template_decl) result_ovl = cp_fully_fold (result_ovl); if (CONSTANT_CLASS_P (result_ovl) && TREE_OVERFLOW_P (result_ovl)) overflow_warning (combined_loc, result_ovl); return result; } /* Finish a compound-literal expression or C++11 functional cast with aggregate initializer. TYPE is the type to which the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */ tree finish_compound_literal (tree type, tree compound_literal, tsubst_flags_t complain, fcl_t fcl_context) { if (type == error_mark_node) return error_mark_node; if (TYPE_REF_P (type)) { compound_literal = finish_compound_literal (TREE_TYPE (type), compound_literal, complain, fcl_context); /* The prvalue is then used to direct-initialize the reference. */ tree r = (perform_implicit_conversion_flags (type, compound_literal, complain, LOOKUP_NORMAL)); return convert_from_reference (r); } if (!TYPE_OBJ_P (type)) { if (complain & tf_error) error ("compound literal of non-object type %qT", type); return error_mark_node; } if (tree anode = type_uses_auto (type)) if (CLASS_PLACEHOLDER_TEMPLATE (anode)) { type = do_auto_deduction (type, compound_literal, anode, complain, adc_variable_type); if (type == error_mark_node) return error_mark_node; } /* Used to hold a copy of the compound literal in a template. */ tree orig_cl = NULL_TREE; if (processing_template_decl) { const bool dependent_p = (instantiation_dependent_expression_p (compound_literal) || dependent_type_p (type)); if (dependent_p) /* We're about to return, no need to copy. */ orig_cl = compound_literal; else /* We're going to need a copy. */ orig_cl = unshare_constructor (compound_literal); TREE_TYPE (orig_cl) = type; /* Mark the expression as a compound literal. */ TREE_HAS_CONSTRUCTOR (orig_cl) = 1; /* And as instantiation-dependent. */ CONSTRUCTOR_IS_DEPENDENT (orig_cl) = dependent_p; if (fcl_context == fcl_c99) CONSTRUCTOR_C99_COMPOUND_LITERAL (orig_cl) = 1; /* If the compound literal is dependent, we're done for now. */ if (dependent_p) return orig_cl; /* Otherwise, do go on to e.g. check narrowing. */ } type = complete_type (type); if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST everywhere that deals with function arguments would be a pain, so just wrap it in a TREE_LIST. The parser set a flag so we know that it came from T{} rather than T({}). */ CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1; compound_literal = build_tree_list (NULL_TREE, compound_literal); return build_functional_cast (input_location, type, compound_literal, complain); } if (TREE_CODE (type) == ARRAY_TYPE && check_array_initializer (NULL_TREE, type, compound_literal)) return error_mark_node; compound_literal = reshape_init (type, compound_literal, complain); if (SCALAR_TYPE_P (type) && !BRACE_ENCLOSED_INITIALIZER_P (compound_literal)) { tree t = instantiate_non_dependent_expr_sfinae (compound_literal, complain); if (!check_narrowing (type, t, complain)) return error_mark_node; } if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { cp_complete_array_type_or_error (&type, compound_literal, false, complain); if (type == error_mark_node) return error_mark_node; } compound_literal = digest_init_flags (type, compound_literal, LOOKUP_NORMAL | LOOKUP_NO_NARROWING, complain); if (compound_literal == error_mark_node) return error_mark_node; /* If we're in a template, return the original compound literal. */ if (orig_cl) { if (!VECTOR_TYPE_P (type)) return get_target_expr_sfinae (orig_cl, complain); else return orig_cl; } if (TREE_CODE (compound_literal) == CONSTRUCTOR) { TREE_HAS_CONSTRUCTOR (compound_literal) = true; if (fcl_context == fcl_c99) CONSTRUCTOR_C99_COMPOUND_LITERAL (compound_literal) = 1; } /* Put static/constant array temporaries in static variables. */ /* FIXME all C99 compound literals should be variables rather than C++ temporaries, unless they are used as an aggregate initializer. */ if ((!at_function_scope_p () || CP_TYPE_CONST_P (type)) && fcl_context == fcl_c99 && TREE_CODE (type) == ARRAY_TYPE && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) && initializer_constant_valid_p (compound_literal, type)) { tree decl = create_temporary_var (type); DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = compound_literal; TREE_STATIC (decl) = 1; if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type)) { /* 5.19 says that a constant expression can include an lvalue-rvalue conversion applied to "a glvalue of literal type that refers to a non-volatile temporary object initialized with a constant expression". Rather than try to communicate that this VAR_DECL is a temporary, just mark it constexpr. */ DECL_DECLARED_CONSTEXPR_P (decl) = true; DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; TREE_CONSTANT (decl) = true; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); decl = pushdecl_top_level (decl); DECL_NAME (decl) = make_anon_name (); SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); /* Make sure the destructor is callable. */ tree clean = cxx_maybe_build_cleanup (decl, complain); if (clean == error_mark_node) return error_mark_node; return decl; } /* Represent other compound literals with TARGET_EXPR so we produce an lvalue, but can elide copies. */ if (!VECTOR_TYPE_P (type)) compound_literal = get_target_expr_sfinae (compound_literal, complain); return compound_literal; } /* Return the declaration for the function-name variable indicated by ID. */ tree finish_fname (tree id) { tree decl; decl = fname_decl (input_location, C_RID_CODE (id), id); if (processing_template_decl && current_function_decl && decl != error_mark_node) decl = DECL_NAME (decl); return decl; } /* Finish a translation unit. */ void finish_translation_unit (void) { /* In case there were missing closebraces, get us back to the global binding level. */ pop_everything (); while (current_namespace != global_namespace) pop_namespace (); /* Do file scope __FUNCTION__ et al. */ finish_fname_decls (); if (scope_chain->omp_declare_target_attribute) { if (!errorcount) error ("%<#pragma omp declare target%> without corresponding " "%<#pragma omp end declare target%>"); scope_chain->omp_declare_target_attribute = 0; } } /* Finish a template type parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_type_parm (tree aggr, tree identifier) { if (aggr != class_type_node) { permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>"); aggr = class_type_node; } return build_tree_list (aggr, identifier); } /* Finish a template template parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_template_parm (tree aggr, tree identifier) { tree decl = build_decl (input_location, TYPE_DECL, identifier, NULL_TREE); tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE); DECL_TEMPLATE_PARMS (tmpl) = current_template_parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_ARTIFICIAL (decl) = 1; /* Associate the constraints with the underlying declaration, not the template. */ tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree constr = build_constraints (reqs, NULL_TREE); set_constraints (decl, constr); end_template_decl (); gcc_assert (DECL_TEMPLATE_PARMS (tmpl)); check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend=*/0); return finish_template_type_parm (aggr, tmpl); } /* ARGUMENT is the default-argument value for a template template parameter. If ARGUMENT is invalid, issue error messages and return the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */ tree check_template_template_default_arg (tree argument) { if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) { if (TREE_CODE (argument) == TYPE_DECL) error ("invalid use of type %qT as a default value for a template " "template-parameter", TREE_TYPE (argument)); else error ("invalid default argument for a template template parameter"); return error_mark_node; } return argument; } /* Begin a class definition, as indicated by T. */ tree begin_class_definition (tree t) { if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t))) return error_mark_node; if (processing_template_parmlist && !LAMBDA_TYPE_P (t)) { error ("definition of %q#T inside template parameter list", t); return error_mark_node; } /* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733 are passed the same as decimal scalar types. */ if (TREE_CODE (t) == RECORD_TYPE && !processing_template_decl) { tree ns = TYPE_CONTEXT (t); if (ns && TREE_CODE (ns) == NAMESPACE_DECL && DECL_CONTEXT (ns) == std_node && DECL_NAME (ns) && id_equal (DECL_NAME (ns), "decimal")) { const char *n = TYPE_NAME_STRING (t); if ((strcmp (n, "decimal32") == 0) || (strcmp (n, "decimal64") == 0) || (strcmp (n, "decimal128") == 0)) TYPE_TRANSPARENT_AGGR (t) = 1; } } /* A non-implicit typename comes from code like: template <typename T> struct A { template <typename U> struct A<T>::B ... This is erroneous. */ else if (TREE_CODE (t) == TYPENAME_TYPE) { error ("invalid definition of qualified type %qT", t); t = error_mark_node; } if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t)) { t = make_class_type (RECORD_TYPE); pushtag (make_anon_name (), t); } if (TYPE_BEING_DEFINED (t)) { t = make_class_type (TREE_CODE (t)); pushtag (TYPE_IDENTIFIER (t), t); } maybe_process_partial_specialization (t); pushclass (t); TYPE_BEING_DEFINED (t) = 1; class_binding_level->defining_class_p = 1; if (flag_pack_struct) { tree v; TYPE_PACKED (t) = 1; /* Even though the type is being defined for the first time here, there might have been a forward declaration, so there might be cv-qualified variants of T. */ for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; } /* Reset the interface data, at the earliest possible moment, as it might have been set via a class foo; before. */ if (! TYPE_UNNAMED_P (t)) { struct c_fileinfo *finfo = \ get_fileinfo (LOCATION_FILE (input_location)); CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only; SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown); } reset_specialization (); /* Make a declaration for this class in its own scope. */ build_self_reference (); return t; } /* Finish the member declaration given by DECL. */ void finish_member_declaration (tree decl) { if (decl == error_mark_node || decl == NULL_TREE) return; if (decl == void_type_node) /* The COMPONENT was a friend, not a member, and so there's nothing for us to do. */ return; /* We should see only one DECL at a time. */ gcc_assert (DECL_CHAIN (decl) == NULL_TREE); /* Don't add decls after definition. */ gcc_assert (TYPE_BEING_DEFINED (current_class_type) /* We can add lambda types when late parsing default arguments. */ || LAMBDA_TYPE_P (TREE_TYPE (decl))); /* Set up access control for DECL. */ TREE_PRIVATE (decl) = (current_access_specifier == access_private_node); TREE_PROTECTED (decl) = (current_access_specifier == access_protected_node); if (TREE_CODE (decl) == TEMPLATE_DECL) { TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl); TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl); } /* Mark the DECL as a member of the current class, unless it's a member of an enumeration. */ if (TREE_CODE (decl) != CONST_DECL) DECL_CONTEXT (decl) = current_class_type; if (TREE_CODE (decl) == USING_DECL) /* For now, ignore class-scope USING_DECLS, so that debugging backends do not see them. */ DECL_IGNORED_P (decl) = 1; /* Check for bare parameter packs in the non-static data member declaration. */ if (TREE_CODE (decl) == FIELD_DECL) { if (check_for_bare_parameter_packs (TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = NULL_TREE; } /* [dcl.link] A C language linkage is ignored for the names of class members and the member function type of class member functions. */ if (DECL_LANG_SPECIFIC (decl)) SET_DECL_LANGUAGE (decl, lang_cplusplus); bool add = false; /* Functions and non-functions are added differently. */ if (DECL_DECLARES_FUNCTION_P (decl)) add = add_method (current_class_type, decl, false); /* Enter the DECL into the scope of the class, if the class isn't a closure (whose fields are supposed to be unnamed). */ else if (CLASSTYPE_LAMBDA_EXPR (current_class_type) || pushdecl_class_level (decl)) add = true; if (add) { /* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields go at the beginning. The reason is that legacy_nonfn_member_lookup searches the list in order, and we want a field name to override a type name so that the "struct stat hack" will work. In particular: struct S { enum E { }; static const int E = 5; int ary[S::E]; } s; is valid. */ if (TREE_CODE (decl) == TYPE_DECL) TYPE_FIELDS (current_class_type) = chainon (TYPE_FIELDS (current_class_type), decl); else { DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type); TYPE_FIELDS (current_class_type) = decl; } maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } } /* Finish processing a complete template declaration. The PARMS are the template parameters. */ void finish_template_decl (tree parms) { if (parms) end_template_decl (); else end_specialization (); } // Returns the template type of the class scope being entered. If we're // entering a constrained class scope. TYPE is the class template // scope being entered and we may need to match the intended type with // a constrained specialization. For example: // // template<Object T> // struct S { void f(); }; #1 // // template<Object T> // void S<T>::f() { } #2 // // We check, in #2, that S<T> refers precisely to the type declared by // #1 (i.e., that the constraints match). Note that the following should // be an error since there is no specialization of S<T> that is // unconstrained, but this is not diagnosed here. // // template<typename T> // void S<T>::f() { } // // We cannot diagnose this problem here since this function also matches // qualified template names that are not part of a definition. For example: // // template<Integral T, Floating_point U> // typename pair<T, U>::first_type void f(T, U); // // Here, it is unlikely that there is a partial specialization of // pair constrained for for Integral and Floating_point arguments. // // The general rule is: if a constrained specialization with matching // constraints is found return that type. Also note that if TYPE is not a // class-type (e.g. a typename type), then no fixup is needed. static tree fixup_template_type (tree type) { // Find the template parameter list at the a depth appropriate to // the scope we're trying to enter. tree parms = current_template_parms; int depth = template_class_depth (type); for (int n = processing_template_decl; n > depth && parms; --n) parms = TREE_CHAIN (parms); if (!parms) return type; tree cur_reqs = TEMPLATE_PARMS_CONSTRAINTS (parms); tree cur_constr = build_constraints (cur_reqs, NULL_TREE); // Search for a specialization whose type and constraints match. tree tmpl = CLASSTYPE_TI_TEMPLATE (type); tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); while (specs) { tree spec_constr = get_constraints (TREE_VALUE (specs)); // If the type and constraints match a specialization, then we // are entering that type. if (same_type_p (type, TREE_TYPE (specs)) && equivalent_constraints (cur_constr, spec_constr)) return TREE_TYPE (specs); specs = TREE_CHAIN (specs); } // If no specialization matches, then must return the type // previously found. return type; } /* Finish processing a template-id (which names a type) of the form NAME < ARGS >. Return the TYPE_DECL for the type named by the template-id. If ENTERING_SCOPE is nonzero we are about to enter the scope of template-id indicated. */ tree finish_template_type (tree name, tree args, int entering_scope) { tree type; type = lookup_template_class (name, args, NULL_TREE, NULL_TREE, entering_scope, tf_warning_or_error | tf_user); /* If we might be entering the scope of a partial specialization, find the one with the right constraints. */ if (flag_concepts && entering_scope && CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && dependent_type_p (type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type))) type = fixup_template_type (type); if (type == error_mark_node) return type; else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type)) return TYPE_STUB_DECL (type); else return TYPE_NAME (type); } /* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER. Return a TREE_LIST containing the ACCESS_SPECIFIER and the BASE_CLASS, or NULL_TREE if an error occurred. The ACCESS_SPECIFIER is one of access_{default,public,protected_private}_node. For a virtual base we set TREE_TYPE. */ tree finish_base_specifier (tree base, tree access, bool virtual_p) { tree result; if (base == error_mark_node) { error ("invalid base-class specification"); result = NULL_TREE; } else if (! MAYBE_CLASS_TYPE_P (base)) { error ("%qT is not a class type", base); result = NULL_TREE; } else { if (cp_type_quals (base) != 0) { /* DR 484: Can a base-specifier name a cv-qualified class type? */ base = TYPE_MAIN_VARIANT (base); } result = build_tree_list (access, base); if (virtual_p) TREE_TYPE (result) = integer_type_node; } return result; } /* If FNS is a member function, a set of member functions, or a template-id referring to one or more member functions, return a BASELINK for FNS, incorporating the current access context. Otherwise, return FNS unchanged. */ tree baselink_for_fns (tree fns) { tree scope; tree cl; if (BASELINK_P (fns) || error_operand_p (fns)) return fns; scope = ovl_scope (fns); if (!CLASS_TYPE_P (scope)) return fns; cl = currently_open_derived_class (scope); if (!cl) cl = scope; cl = TYPE_BINFO (cl); return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE); } /* Returns true iff DECL is a variable from a function outside the current one. */ static bool outer_var_p (tree decl) { return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) && DECL_FUNCTION_SCOPE_P (decl) /* Don't get confused by temporaries. */ && DECL_NAME (decl) && (DECL_CONTEXT (decl) != current_function_decl || parsing_nsdmi ())); } /* As above, but also checks that DECL is automatic. */ bool outer_automatic_var_p (tree decl) { return (outer_var_p (decl) && !TREE_STATIC (decl)); } /* DECL satisfies outer_automatic_var_p. Possibly complain about it or rewrite it for lambda capture. If ODR_USE is true, we're being called from mark_use, and we complain about use of constant variables. If ODR_USE is false, we're being called for the id-expression, and we do lambda capture. */ tree process_outer_var_ref (tree decl, tsubst_flags_t complain, bool odr_use) { if (cp_unevaluated_operand) { tree type = TREE_TYPE (decl); if (!dependent_type_p (type) && variably_modified_type_p (type, NULL_TREE)) /* VLAs are used even in unevaluated context. */; else /* It's not a use (3.2) if we're in an unevaluated context. */ return decl; } if (decl == error_mark_node) return decl; tree context = DECL_CONTEXT (decl); tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; tree initializer = convert_from_reference (decl); /* Mark it as used now even if the use is ill-formed. */ if (!mark_used (decl, complain)) return error_mark_node; if (parsing_nsdmi ()) containing_function = NULL_TREE; if (containing_function && LAMBDA_FUNCTION_P (containing_function)) { /* Check whether we've already built a proxy. */ tree var = decl; while (is_normal_capture_proxy (var)) var = DECL_CAPTURED_VARIABLE (var); tree d = retrieve_local_specialization (var); if (d && d != decl && is_capture_proxy (d)) { if (DECL_CONTEXT (d) == containing_function) /* We already have an inner proxy. */ return d; else /* We need to capture an outer proxy. */ return process_outer_var_ref (d, complain, odr_use); } } /* If we are in a lambda function, we can move out until we hit 1. the context, 2. a non-lambda function, or 3. a non-default capturing lambda function. */ while (context != containing_function /* containing_function can be null with invalid generic lambdas. */ && containing_function && LAMBDA_FUNCTION_P (containing_function)) { tree closure = DECL_CONTEXT (containing_function); lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure); if (TYPE_CLASS_SCOPE_P (closure)) /* A lambda in an NSDMI (c++/64496). */ break; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) break; lambda_stack = tree_cons (NULL_TREE, lambda_expr, lambda_stack); containing_function = decl_function_context (containing_function); } /* In a lambda within a template, wait until instantiation time to implicitly capture a parameter pack. We want to wait because we don't know if we're capturing the whole pack or a single element, and it's OK to wait because find_parameter_packs_r walks into the lambda body. */ if (context == containing_function && DECL_PACK_P (decl)) return decl; if (lambda_expr && VAR_P (decl) && DECL_ANON_UNION_VAR_P (decl)) { if (complain & tf_error) error ("cannot capture member %qD of anonymous union", decl); return error_mark_node; } /* Do lambda capture when processing the id-expression, not when odr-using a variable. */ if (!odr_use && context == containing_function) decl = add_default_capture (lambda_stack, /*id=*/DECL_NAME (decl), initializer); /* Only an odr-use of an outer automatic variable causes an error, and a constant variable can decay to a prvalue constant without odr-use. So don't complain yet. */ else if (!odr_use && decl_constant_var_p (decl)) return decl; else if (lambda_expr) { if (complain & tf_error) { error ("%qD is not captured", decl); tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) inform (location_of (closure), "the lambda has no capture-default"); else if (TYPE_CLASS_SCOPE_P (closure)) inform (UNKNOWN_LOCATION, "lambda in local class %q+T cannot " "capture variables from the enclosing context", TYPE_CONTEXT (closure)); inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl); } return error_mark_node; } else { if (complain & tf_error) { error (VAR_P (decl) ? G_("use of local variable with automatic storage from " "containing function") : G_("use of parameter from containing function")); inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl); } return error_mark_node; } return decl; } /* ID_EXPRESSION is a representation of parsed, but unprocessed, id-expression. (See cp_parser_id_expression for details.) SCOPE, if non-NULL, is the type or namespace used to explicitly qualify ID_EXPRESSION. DECL is the entity to which that name has been resolved. *CONSTANT_EXPRESSION_P is true if we are presently parsing a constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will be set to true if this expression isn't permitted in a constant-expression, but it is otherwise not set by this function. *ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a constant-expression, but a non-constant expression is also permissible. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. If an error occurs, and it is the kind of error that might cause the parser to abort a tentative parse, *ERROR_MSG is filled in. It is the caller's responsibility to issue the message. *ERROR_MSG will be a string with static storage duration, so the caller need not "free" it. Return an expression for the entity, after issuing appropriate diagnostics. This function is also responsible for transforming a reference to a non-static member into a COMPONENT_REF that makes the use of "this" explicit. Upon return, *IDK will be filled in appropriately. */ static cp_expr finish_id_expression_1 (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { decl = strip_using_decl (decl); /* Initialize the output parameters. */ *idk = CP_ID_KIND_NONE; *error_msg = NULL; if (id_expression == error_mark_node) return error_mark_node; /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR || TREE_CODE (decl) == TYPE_DECL) ; /* Look up the name. */ else { if (decl == error_mark_node) { /* Name lookup failed. */ if (scope && (!TYPE_P (scope) || (!dependent_type_p (scope) && !(identifier_p (id_expression) && IDENTIFIER_CONV_OP_P (id_expression) && dependent_type_p (TREE_TYPE (id_expression)))))) { /* If the qualifying type is non-dependent (and the name does not name a conversion operator to a dependent type), issue an error. */ qualified_name_lookup_error (scope, id_expression, decl, location); return error_mark_node; } else if (!scope) { /* It may be resolved via Koenig lookup. */ *idk = CP_ID_KIND_UNQUALIFIED; return id_expression; } else decl = id_expression; } /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ if (!scope && decl != error_mark_node && identifier_p (id_expression)) maybe_note_name_used_in_class (id_expression, decl); /* A use in unevaluated operand might not be instantiated appropriately if tsubst_copy builds a dummy parm, or if we never instantiate a generic lambda, so mark it now. */ if (processing_template_decl && cp_unevaluated_operand) mark_type_use (decl); /* Disallow uses of local variables from containing functions, except within lambda-expressions. */ if (outer_automatic_var_p (decl)) { decl = process_outer_var_ref (decl, tf_warning_or_error); if (decl == error_mark_node) return error_mark_node; } /* Also disallow uses of function parameters outside the function body, except inside an unevaluated context (i.e. decltype). */ if (TREE_CODE (decl) == PARM_DECL && DECL_CONTEXT (decl) == NULL_TREE && !cp_unevaluated_operand) { *error_msg = G_("use of parameter outside function body"); return error_mark_node; } } /* If we didn't find anything, or what we found was a type, then this wasn't really an id-expression. */ if (TREE_CODE (decl) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (decl)) { *error_msg = G_("missing template arguments"); return error_mark_node; } else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == NAMESPACE_DECL) { *error_msg = G_("expected primary-expression"); return error_mark_node; } /* If the name resolved to a template parameter, there is no need to look it up again later. */ if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl)) || TREE_CODE (decl) == TEMPLATE_PARM_INDEX) { tree r; *idk = CP_ID_KIND_NONE; if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX) decl = TEMPLATE_PARM_DECL (decl); r = DECL_INITIAL (decl); if (CLASS_TYPE_P (TREE_TYPE (r)) && !CP_TYPE_CONST_P (TREE_TYPE (r))) { /* If the entity is a template parameter object for a template parameter of type T, the type of the expression is const T. */ tree ctype = TREE_TYPE (r); ctype = cp_build_qualified_type (ctype, (cp_type_quals (ctype) | TYPE_QUAL_CONST)); r = build1 (VIEW_CONVERT_EXPR, ctype, r); } r = convert_from_reference (r); if (integral_constant_expression_p && !dependent_type_p (TREE_TYPE (decl)) && !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r)))) { if (!allow_non_integral_constant_expression_p) error ("template parameter %qD of type %qT is not allowed in " "an integral constant expression because it is not of " "integral or enumeration type", decl, TREE_TYPE (decl)); *non_integral_constant_expression_p = true; } return r; } else { bool dependent_p = type_dependent_expression_p (decl); /* If the declaration was explicitly qualified indicate that. The semantics of `A::f(3)' are different than `f(3)' if `f' is virtual. */ *idk = (scope ? CP_ID_KIND_QUALIFIED : (TREE_CODE (decl) == TEMPLATE_ID_EXPR ? CP_ID_KIND_TEMPLATE_ID : (dependent_p ? CP_ID_KIND_UNQUALIFIED_DEPENDENT : CP_ID_KIND_UNQUALIFIED))); if (dependent_p && DECL_P (decl) && any_dependent_type_attributes_p (DECL_ATTRIBUTES (decl))) /* Dependent type attributes on the decl mean that the TREE_TYPE is wrong, so just return the identifier. */ return id_expression; if (DECL_CLASS_TEMPLATE_P (decl)) { error ("use of class template %qT as expression", decl); return error_mark_node; } if (TREE_CODE (decl) == TREE_LIST) { /* Ambiguous reference to base members. */ error ("request for member %qD is ambiguous in " "multiple inheritance lattice", id_expression); print_candidates (decl); return error_mark_node; } /* Mark variable-like entities as used. Functions are similarly marked either below or after overload resolution. */ if ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == RESULT_DECL) && !mark_used (decl)) return error_mark_node; /* Only certain kinds of names are allowed in constant expression. Template parameters have already been handled above. */ if (! error_operand_p (decl) && !dependent_p && integral_constant_expression_p && !decl_constant_var_p (decl) && TREE_CODE (decl) != CONST_DECL && !builtin_valid_in_constant_expr_p (decl) && !concept_check_p (decl)) { if (!allow_non_integral_constant_expression_p) { error ("%qD cannot appear in a constant-expression", decl); return error_mark_node; } *non_integral_constant_expression_p = true; } if (tree wrap = maybe_get_tls_wrapper_call (decl)) /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ decl = wrap; else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && !dependent_p && variable_template_p (TREE_OPERAND (decl, 0)) && !concept_check_p (decl)) { decl = finish_template_variable (decl); mark_used (decl); decl = convert_from_reference (decl); } else if (concept_check_p (decl)) { /* Nothing more to do. All of the analysis for concept checks is done by build_conept_id, called from the parser. */ } else if (scope) { if (TREE_CODE (decl) == SCOPE_REF) { gcc_assert (same_type_p (scope, TREE_OPERAND (decl, 0))); decl = TREE_OPERAND (decl, 1); } decl = (adjust_result_of_qualified_name_lookup (decl, scope, current_nonlambda_class_type())); if (TREE_CODE (decl) == FUNCTION_DECL) mark_used (decl); cp_warn_deprecated_use_scopes (scope); if (TYPE_P (scope)) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else decl = convert_from_reference (decl); } else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); } else if (is_overloaded_fn (decl)) { /* We only need to look at the first function, because all the fns share the attribute we're concerned with (all member fns or all non-members). */ tree first_fn = get_first_fn (decl); first_fn = STRIP_TEMPLATE (first_fn); /* [basic.def.odr]: "A function whose name appears as a potentially-evaluated expression is odr-used if it is the unique lookup result". But only mark it if it's a complete postfix-expression; in a call, ADL might select a different function, and we'll call mark_used in build_over_call. */ if (done && !really_overloaded_fn (decl) && !mark_used (first_fn)) return error_mark_node; if (!template_arg_p && (TREE_CODE (first_fn) == USING_DECL || (TREE_CODE (first_fn) == FUNCTION_DECL && DECL_FUNCTION_MEMBER_P (first_fn) && !shared_member_p (decl)))) { /* A set of member functions. */ decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0); return finish_class_member_access_expr (decl, id_expression, /*template_p=*/false, tf_warning_or_error); } decl = baselink_for_fns (decl); } else { if (DECL_P (decl) && DECL_NONLOCAL (decl) && DECL_CLASS_SCOPE_P (decl)) { tree context = context_for_name_lookup (decl); if (context != current_class_type) { tree path = currently_open_derived_class (context); perform_or_defer_access_check (TYPE_BINFO (path), decl, decl, tf_warning_or_error); } } decl = convert_from_reference (decl); } } return cp_expr (decl, location); } /* As per finish_id_expression_1, but adding a wrapper node around the result if needed to express LOCATION. */ cp_expr finish_id_expression (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { cp_expr result = finish_id_expression_1 (id_expression, decl, scope, idk, integral_constant_expression_p, allow_non_integral_constant_expression_p, non_integral_constant_expression_p, template_p, done, address_p, template_arg_p, error_msg, location); return result.maybe_add_location_wrapper (); } /* Implement the __typeof keyword: Return the type of EXPR, suitable for use as a type-specifier. */ tree finish_typeof (tree expr) { tree type; if (type_dependent_expression_p (expr)) { type = cxx_make_type (TYPEOF_TYPE); TYPEOF_TYPE_EXPR (type) = expr; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } expr = mark_type_use (expr); type = unlowered_expr_type (expr); if (!type || type == unknown_type_node) { error ("type of %qE is unknown", expr); return error_mark_node; } return type; } /* Implement the __underlying_type keyword: Return the underlying type of TYPE, suitable for use as a type-specifier. */ tree finish_underlying_type (tree type) { tree underlying_type; if (processing_template_decl) { underlying_type = cxx_make_type (UNDERLYING_TYPE); UNDERLYING_TYPE_TYPE (underlying_type) = type; SET_TYPE_STRUCTURAL_EQUALITY (underlying_type); return underlying_type; } if (!complete_type_or_else (type, NULL_TREE)) return error_mark_node; if (TREE_CODE (type) != ENUMERAL_TYPE) { error ("%qT is not an enumeration type", type); return error_mark_node; } underlying_type = ENUM_UNDERLYING_TYPE (type); /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information. See finish_enum_value_list for details. */ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type)) underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type), TYPE_UNSIGNED (underlying_type)); return underlying_type; } /* Implement the __direct_bases keyword: Return the direct base classes of type. */ tree calculate_direct_bases (tree type, tsubst_flags_t complain) { if (!complete_type_or_maybe_complain (type, NULL_TREE, complain) || !NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); releasing_vec vector; vec<tree, va_gc> *base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type)); tree binfo; unsigned i; /* Virtual bases are initialized first */ for (i = 0; base_binfos->iterate (i, &binfo); i++) if (BINFO_VIRTUAL_P (binfo)) vec_safe_push (vector, binfo); /* Now non-virtuals */ for (i = 0; base_binfos->iterate (i, &binfo); i++) if (!BINFO_VIRTUAL_P (binfo)) vec_safe_push (vector, binfo); tree bases_vec = make_tree_vec (vector->length ()); for (i = 0; i < vector->length (); ++i) TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]); return bases_vec; } /* Implement the __bases keyword: Return the base classes of type */ /* Find morally non-virtual base classes by walking binfo hierarchy */ /* Virtual base classes are handled separately in finish_bases */ static tree dfs_calculate_bases_pre (tree binfo, void * /*data_*/) { /* Don't walk bases of virtual bases */ return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE; } static tree dfs_calculate_bases_post (tree binfo, void *data_) { vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_); if (!BINFO_VIRTUAL_P (binfo)) vec_safe_push (*data, BINFO_TYPE (binfo)); return NULL_TREE; } /* Calculates the morally non-virtual base classes of a class */ static vec<tree, va_gc> * calculate_bases_helper (tree type) { vec<tree, va_gc> *vector = make_tree_vector (); /* Now add non-virtual base classes in order of construction */ if (TYPE_BINFO (type)) dfs_walk_all (TYPE_BINFO (type), dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector); return vector; } tree calculate_bases (tree type, tsubst_flags_t complain) { if (!complete_type_or_maybe_complain (type, NULL_TREE, complain) || !NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); releasing_vec vector; tree bases_vec = NULL_TREE; unsigned i; vec<tree, va_gc> *vbases; tree binfo; /* First go through virtual base classes */ for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0; vec_safe_iterate (vbases, i, &binfo); i++) { releasing_vec vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo)); vec_safe_splice (vector, vbase_bases); } /* Now for the non-virtual bases */ releasing_vec nonvbases = calculate_bases_helper (type); vec_safe_splice (vector, nonvbases); /* Note that during error recovery vector->length can even be zero. */ if (vector->length () > 1) { /* Last element is entire class, so don't copy */ bases_vec = make_tree_vec (vector->length () - 1); for (i = 0; i < vector->length () - 1; ++i) TREE_VEC_ELT (bases_vec, i) = (*vector)[i]; } else bases_vec = make_tree_vec (0); return bases_vec; } tree finish_bases (tree type, bool direct) { tree bases = NULL_TREE; if (!processing_template_decl) { /* Parameter packs can only be used in templates */ error ("parameter pack %<__bases%> only valid in template declaration"); return error_mark_node; } bases = cxx_make_type (BASES); BASES_TYPE (bases) = type; BASES_DIRECT (bases) = direct; SET_TYPE_STRUCTURAL_EQUALITY (bases); return bases; } /* Perform C++-specific checks for __builtin_offsetof before calling fold_offsetof. */ tree finish_offsetof (tree object_ptr, tree expr, location_t loc) { /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) { expr = build2 (OFFSETOF_EXPR, size_type_node, expr, object_ptr); SET_EXPR_LOCATION (expr, loc); return expr; } if (expr == error_mark_node) return error_mark_node; if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR) { error ("cannot apply %<offsetof%> to destructor %<~%T%>", TREE_OPERAND (expr, 2)); return error_mark_node; } if (FUNC_OR_METHOD_TYPE_P (TREE_TYPE (expr)) || TREE_TYPE (expr) == unknown_type_node) { while (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); if (DECL_P (expr)) { error ("cannot apply %<offsetof%> to member function %qD", expr); inform (DECL_SOURCE_LOCATION (expr), "declared here"); } else error ("cannot apply %<offsetof%> to member function"); return error_mark_node; } if (TREE_CODE (expr) == CONST_DECL) { error ("cannot apply %<offsetof%> to an enumerator %qD", expr); return error_mark_node; } if (REFERENCE_REF_P (expr)) expr = TREE_OPERAND (expr, 0); if (!complete_type_or_else (TREE_TYPE (TREE_TYPE (object_ptr)), object_ptr)) return error_mark_node; if (warn_invalid_offsetof && CLASS_TYPE_P (TREE_TYPE (TREE_TYPE (object_ptr))) && CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (TREE_TYPE (object_ptr))) && cp_unevaluated_operand == 0) warning_at (loc, OPT_Winvalid_offsetof, "%<offsetof%> within " "non-standard-layout type %qT is conditionally-supported", TREE_TYPE (TREE_TYPE (object_ptr))); return fold_offsetof (expr); } /* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This function is broken out from the above for the benefit of the tree-ssa project. */ void simplify_aggr_init_expr (tree *tp) { tree aggr_init_expr = *tp; /* Form an appropriate CALL_EXPR. */ tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr); tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr); tree type = TREE_TYPE (slot); tree call_expr; enum style_t { ctor, arg, pcc } style; if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr)) style = ctor; #ifdef PCC_STATIC_STRUCT_RETURN else if (1) style = pcc; #endif else { gcc_assert (TREE_ADDRESSABLE (type)); style = arg; } call_expr = build_call_array_loc (input_location, TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))), fn, aggr_init_expr_nargs (aggr_init_expr), AGGR_INIT_EXPR_ARGP (aggr_init_expr)); TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr); CALL_FROM_THUNK_P (call_expr) = AGGR_INIT_FROM_THUNK_P (aggr_init_expr); CALL_EXPR_OPERATOR_SYNTAX (call_expr) = CALL_EXPR_OPERATOR_SYNTAX (aggr_init_expr); CALL_EXPR_ORDERED_ARGS (call_expr) = CALL_EXPR_ORDERED_ARGS (aggr_init_expr); CALL_EXPR_REVERSE_ARGS (call_expr) = CALL_EXPR_REVERSE_ARGS (aggr_init_expr); if (style == ctor) { /* Replace the first argument to the ctor with the address of the slot. */ cxx_mark_addressable (slot); CALL_EXPR_ARG (call_expr, 0) = build1 (ADDR_EXPR, build_pointer_type (type), slot); } else if (style == arg) { /* Just mark it addressable here, and leave the rest to expand_call{,_inline}. */ cxx_mark_addressable (slot); CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true; call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr); } else if (style == pcc) { /* If we're using the non-reentrant PCC calling convention, then we need to copy the returned value out of the static buffer into the SLOT. */ push_deferring_access_checks (dk_no_check); call_expr = build_aggr_init (slot, call_expr, DIRECT_BIND | LOOKUP_ONLYCONVERTING, tf_warning_or_error); pop_deferring_access_checks (); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot); } if (AGGR_INIT_ZERO_FIRST (aggr_init_expr)) { tree init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); init = build2 (INIT_EXPR, void_type_node, slot, init); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr), init, call_expr); } *tp = call_expr; } /* Emit all thunks to FN that should be emitted when FN is emitted. */ void emit_associated_thunks (tree fn) { /* When we use vcall offsets, we emit thunks with the virtual functions to which they thunk. The whole point of vcall offsets is so that you can know statically the entire set of thunks that will ever be needed for a given virtual function, thereby enabling you to output all the thunks with the function itself. */ if (DECL_VIRTUAL_P (fn) /* Do not emit thunks for extern template instantiations. */ && ! DECL_REALLY_EXTERN (fn)) { tree thunk; for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk)) { if (!THUNK_ALIAS (thunk)) { use_thunk (thunk, /*emit_p=*/1); if (DECL_RESULT_THUNK_P (thunk)) { tree probe; for (probe = DECL_THUNKS (thunk); probe; probe = DECL_CHAIN (probe)) use_thunk (probe, /*emit_p=*/1); } } else gcc_assert (!DECL_THUNKS (thunk)); } } } /* Generate RTL for FN. */ bool expand_or_defer_fn_1 (tree fn) { /* When the parser calls us after finishing the body of a template function, we don't really want to expand the body. */ if (processing_template_decl) { /* Normally, collection only occurs in rest_of_compilation. So, if we don't collect here, we never collect junk generated during the processing of templates until we hit a non-template function. It's not safe to do this inside a nested class, though, as the parser may have local state that is not a GC root. */ if (!function_depth) ggc_collect (); return false; } gcc_assert (DECL_SAVED_TREE (fn)); /* We make a decision about linkage for these functions at the end of the compilation. Until that point, we do not want the back end to output them -- but we do want it to see the bodies of these functions so that it can inline them as appropriate. */ if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn)) { if (DECL_INTERFACE_KNOWN (fn)) /* We've already made a decision as to how this function will be handled. */; else if (!at_eof || DECL_IMMEDIATE_FUNCTION_P (fn) || DECL_OMP_DECLARE_REDUCTION_P (fn)) tentative_decl_linkage (fn); else import_export_decl (fn); /* If the user wants us to keep all inline functions, then mark this function as needed so that finish_file will make sure to output it later. Similarly, all dllexport'd functions must be emitted; there may be callers in other DLLs. */ if (DECL_DECLARED_INLINE_P (fn) && !DECL_REALLY_EXTERN (fn) && !DECL_IMMEDIATE_FUNCTION_P (fn) && !DECL_OMP_DECLARE_REDUCTION_P (fn) && (flag_keep_inline_functions || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))))) { mark_needed (fn); DECL_EXTERNAL (fn) = 0; } } /* If this is a constructor or destructor body, we have to clone it. */ if (maybe_clone_body (fn)) { /* We don't want to process FN again, so pretend we've written it out, even though we haven't. */ TREE_ASM_WRITTEN (fn) = 1; /* If this is a constexpr function, keep DECL_SAVED_TREE. */ if (!DECL_DECLARED_CONSTEXPR_P (fn)) DECL_SAVED_TREE (fn) = NULL_TREE; return false; } /* There's no reason to do any of the work here if we're only doing semantic analysis; this code just generates RTL. */ if (flag_syntax_only) { /* Pretend that this function has been written out so that we don't try to expand it again. */ TREE_ASM_WRITTEN (fn) = 1; return false; } if (DECL_OMP_DECLARE_REDUCTION_P (fn)) return false; return true; } void expand_or_defer_fn (tree fn) { if (expand_or_defer_fn_1 (fn)) { function_depth++; /* Expand or defer, at the whim of the compilation unit manager. */ cgraph_node::finalize_function (fn, function_depth > 1); emit_associated_thunks (fn); function_depth--; } } class nrv_data { public: nrv_data () : visited (37) {} tree var; tree result; hash_table<nofree_ptr_hash <tree_node> > visited; }; /* Helper function for walk_tree, used by finalize_nrv below. */ static tree finalize_nrv_r (tree* tp, int* walk_subtrees, void* data) { class nrv_data *dp = (class nrv_data *)data; tree_node **slot; /* No need to walk into types. There wouldn't be any need to walk into non-statements, except that we have to consider STMT_EXPRs. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* Change all returns to just refer to the RESULT_DECL; this is a nop, but differs from using NULL_TREE in that it indicates that we care about the value of the RESULT_DECL. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Change all cleanups for the NRV to only run when an exception is thrown. */ else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == dp->var) CLEANUP_EH_ONLY (*tp) = 1; /* Replace the DECL_EXPR for the NRV with an initialization of the RESULT_DECL, if needed. */ else if (TREE_CODE (*tp) == DECL_EXPR && DECL_EXPR_DECL (*tp) == dp->var) { tree init; if (DECL_INITIAL (dp->var) && DECL_INITIAL (dp->var) != error_mark_node) init = build2 (INIT_EXPR, void_type_node, dp->result, DECL_INITIAL (dp->var)); else init = build_empty_stmt (EXPR_LOCATION (*tp)); DECL_INITIAL (dp->var) = NULL_TREE; SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp)); *tp = init; } /* And replace all uses of the NRV with the RESULT_DECL. */ else if (*tp == dp->var) *tp = dp->result; /* Avoid walking into the same tree more than once. Unfortunately, we can't just use walk_tree_without duplicates because it would only call us for the first occurrence of dp->var in the function body. */ slot = dp->visited.find_slot (*tp, INSERT); if (*slot) *walk_subtrees = 0; else *slot = *tp; /* Keep iterating. */ return NULL_TREE; } /* Called from finish_function to implement the named return value optimization by overriding all the RETURN_EXPRs and pertinent CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the RESULT_DECL for the function. */ void finalize_nrv (tree *tp, tree var, tree result) { class nrv_data data; /* Copy name from VAR to RESULT. */ DECL_NAME (result) = DECL_NAME (var); /* Don't forget that we take its address. */ TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var); /* Finally set DECL_VALUE_EXPR to avoid assigning a stack slot at -O0 for the original var and debug info uses RESULT location for VAR. */ SET_DECL_VALUE_EXPR (var, result); DECL_HAS_VALUE_EXPR_P (var) = 1; data.var = var; data.result = result; cp_walk_tree (tp, finalize_nrv_r, &data, 0); } /* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */ bool cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor, bool need_copy_ctor, bool need_copy_assignment, bool need_dtor) { int save_errorcount = errorcount; tree info, t; /* Always allocate 3 elements for simplicity. These are the function decls for the ctor, dtor, and assignment op. This layout is known to the three lang hooks, cxx_omp_clause_default_init, cxx_omp_clause_copy_init, and cxx_omp_clause_assign_op. */ info = make_tree_vec (3); CP_OMP_CLAUSE_INFO (c) = info; if (need_default_ctor || need_copy_ctor) { if (need_default_ctor) t = get_default_ctor (type); else t = get_copy_ctor (type, tf_warning_or_error); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 0) = t; } if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error); if (need_copy_assignment) { t = get_copy_assign (type); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 2) = t; } return errorcount != save_errorcount; } /* If DECL is DECL_OMP_PRIVATIZED_MEMBER, return corresponding FIELD_DECL, otherwise return DECL itself. */ static tree omp_clause_decl_field (tree decl) { if (VAR_P (decl) && DECL_HAS_VALUE_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_LANG_SPECIFIC (decl) && DECL_OMP_PRIVATIZED_MEMBER (decl)) { tree f = DECL_VALUE_EXPR (decl); if (INDIRECT_REF_P (f)) f = TREE_OPERAND (f, 0); if (TREE_CODE (f) == COMPONENT_REF) { f = TREE_OPERAND (f, 1); gcc_assert (TREE_CODE (f) == FIELD_DECL); return f; } } return NULL_TREE; } /* Adjust DECL if needed for printing using %qE. */ static tree omp_clause_printable_decl (tree decl) { tree t = omp_clause_decl_field (decl); if (t) return t; return decl; } /* For a FIELD_DECL F and corresponding DECL_OMP_PRIVATIZED_MEMBER VAR_DECL T that doesn't need a DECL_EXPR added, record it for privatization. */ static void omp_note_field_privatization (tree f, tree t) { if (!omp_private_member_map) omp_private_member_map = new hash_map<tree, tree>; tree &v = omp_private_member_map->get_or_insert (f); if (v == NULL_TREE) { v = t; omp_private_member_vec.safe_push (f); /* Signal that we don't want to create DECL_EXPR for this dummy var. */ omp_private_member_vec.safe_push (integer_zero_node); } } /* Privatize FIELD_DECL T, return corresponding DECL_OMP_PRIVATIZED_MEMBER dummy VAR_DECL. */ tree omp_privatize_field (tree t, bool shared) { tree m = finish_non_static_data_member (t, NULL_TREE, NULL_TREE); if (m == error_mark_node) return error_mark_node; if (!omp_private_member_map && !shared) omp_private_member_map = new hash_map<tree, tree>; if (TYPE_REF_P (TREE_TYPE (t))) { gcc_assert (INDIRECT_REF_P (m)); m = TREE_OPERAND (m, 0); } tree vb = NULL_TREE; tree &v = shared ? vb : omp_private_member_map->get_or_insert (t); if (v == NULL_TREE) { v = create_temporary_var (TREE_TYPE (m)); retrofit_lang_decl (v); DECL_OMP_PRIVATIZED_MEMBER (v) = 1; SET_DECL_VALUE_EXPR (v, m); DECL_HAS_VALUE_EXPR_P (v) = 1; if (!shared) omp_private_member_vec.safe_push (t); } return v; } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one known not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one, enum c_omp_region_type ort) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) t = TREE_OPERAND (t, 0); ret = t; if (TREE_CODE (t) == COMPONENT_REF && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM) && !type_dependent_expression_p (t)) { if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_TYPE (TREE_OPERAND (t, 0)) && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); return error_mark_node; } t = TREE_OPERAND (t, 0); if (ort == C_ORT_ACC && TREE_CODE (t) == INDIRECT_REF) t = TREE_OPERAND (t, 0); } if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) return NULL_TREE; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (ort == C_ORT_OMP && TREE_CODE (t) == PARM_DECL && DECL_ARTIFICIAL (t) && DECL_NAME (t) == this_identifier) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (type_dependent_expression_p (ret)) return NULL_TREE; ret = convert_from_reference (ret); return ret; } if (ort == C_ORT_OMP && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) && TREE_CODE (TREE_CHAIN (t)) == FIELD_DECL) TREE_CHAIN (t) = omp_privatize_field (TREE_CHAIN (t), false); ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one, ort); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if ((low_bound && type_dependent_expression_p (low_bound)) || (length && type_dependent_expression_p (length))) return NULL_TREE; if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound) low_bound = mark_rvalue_use (low_bound); if (length) length = mark_rvalue_use (length); /* We need to reduce to real constant-values for checks below. */ if (length) length = fold_simple (length); if (low_bound) low_bound = fold_simple (low_bound); if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)) { if (length != integer_one_node) { error_at (OMP_CLAUSE_LOCATION (c), "expected single pointer in %qs clause", c_omp_map_clause_name (c, ort == C_ORT_ACC)); return error_mark_node; } } if (length != NULL_TREE) { if (!integer_nonzerop (length)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) { if (integer_zerop (length)) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else maybe_zero_len = true; } if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type))); size = size_binop (PLUS_EXPR, size, size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } maybe_zero_len = true; } else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TYPE_PTR_P (type)) { if (length == NULL_TREE) { if (TREE_CODE (ret) == PARM_DECL && DECL_ARRAY_PARAMETER_P (ret)) error_at (OMP_CLAUSE_LOCATION (c), "for array function parameter length expression " "must be specified"); else error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } /* Temporarily disable -fstrong-eval-order for array reductions. The SAVE_EXPR and COMPOUND_EXPR added if low_bound has side-effects is something the middle-end can't cope with and more importantly, it needs to be the actual base variable that is privatized, not some temporary assigned previous value of it. That, together with OpenMP saying how many times the side-effects are evaluated is unspecified, makes int *a, *b; ... reduction(+:a[a = b, 3:10]) really unspecified. */ warning_sentinel s (flag_strong_eval_order, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION); ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c, enum c_omp_region_type ort) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree, 10> types; tree *tp = &OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && TREE_CODE (*tp) == TREE_LIST && TREE_PURPOSE (*tp) && TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC) tp = &TREE_VALUE (*tp); tree first = handle_omp_array_sections_1 (c, *tp, types, maybe_zero_len, first_non_one, ort); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = *tp; tree tem = NULL_TREE; if (processing_template_decl) return false; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); *tp = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; if (processing_template_decl && maybe_zero_len) return false; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (!processing_template_decl && length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else if (processing_template_decl) continue; else { tree l; if (i > first_non_one && ((length && integer_nonzerop (length)) || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); tree eltype = TREE_TYPE (types[num - 1]); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) size = size_binop (EXACT_DIV_EXPR, size, size_in_bytes (eltype)); size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (!processing_template_decl) { if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) { size = size_binop (MINUS_EXPR, size, size_one_node); size = save_expr (size); tree index_type = build_index_type (size); tree eltype = TREE_TYPE (first); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); tree type = build_array_type (eltype, index_type); tree ptype = build_pointer_type (eltype); if (TYPE_REF_P (TREE_TYPE (t)) && INDIRECT_TYPE_P (TREE_TYPE (TREE_TYPE (t)))) t = convert_from_reference (t); else if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); tree t2 = build_fold_addr_expr (first); t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t2); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t2, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t)); if (tree_fits_shwi_p (t2)) t = build2 (MEM_REF, type, t, build_int_cst (ptype, tree_to_shwi (t2))); else { t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, t2); t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR, TREE_TYPE (t), t, t2); t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0)); } OMP_CLAUSE_DECL (c) = t; return false; } OMP_CLAUSE_DECL (c) = first; OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)) return false; if (ort == C_ORT_OMP || ort == C_ORT_ACC) switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_ALLOC: case GOMP_MAP_IF_PRESENT: case GOMP_MAP_TO: case GOMP_MAP_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_ALWAYS_TOFROM: case GOMP_MAP_RELEASE: case GOMP_MAP_DELETE: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_FORCE_PRESENT: OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1; break; default: break; } tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP && ort != C_ORT_ACC) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); else if (TREE_CODE (t) == COMPONENT_REF) { gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH : GOMP_MAP_ALWAYS_POINTER; OMP_CLAUSE_SET_MAP_KIND (c2, k); } else if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) { t = TREE_OPERAND (t, 0); gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH : GOMP_MAP_ALWAYS_POINTER; OMP_CLAUSE_SET_MAP_KIND (c2, k); } else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && !cxx_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); ptr = convert_from_reference (ptr); if (!INDIRECT_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; ptr = OMP_CLAUSE_DECL (c2); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && TYPE_REF_P (TREE_TYPE (ptr)) && INDIRECT_TYPE_P (TREE_TYPE (TREE_TYPE (ptr)))) { tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, OMP_CLAUSE_MAP_KIND (c2)); OMP_CLAUSE_DECL (c3) = ptr; if (OMP_CLAUSE_MAP_KIND (c2) == GOMP_MAP_ALWAYS_POINTER) OMP_CLAUSE_DECL (c2) = build_simple_mem_ref (ptr); else OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr); OMP_CLAUSE_SIZE (c3) = size_zero_node; OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2); OMP_CLAUSE_CHAIN (c2) = c3; } } } return false; } /* Return identifier to look up for omp declare reduction. */ tree omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type) { const char *p = NULL; const char *m = NULL; switch (reduction_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case BIT_IOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: reduction_id = ovl_op_identifier (false, reduction_code); break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } if (type != NULL_TREE) m = mangle_type_string (TYPE_MAIN_VARIANT (type)); const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); if (strncmp (p, prefix, lenp - 1) == 0) lenp = 1; size_t len = strlen (p); size_t lenm = m ? strlen (m) + 1 : 0; char *name = XALLOCAVEC (char, lenp + len + lenm); if (lenp > 1) memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); if (m) { name[lenp + len - 1] = '~'; memcpy (name + lenp + len, m, lenm); } return get_identifier (name); } /* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial FUNCTION_DECL or NULL_TREE if not found. */ static tree omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp, vec<tree> *ambiguousp) { tree orig_id = id; tree baselink = NULL_TREE; if (identifier_p (id)) { cp_id_kind idk; bool nonint_cst_expression_p; const char *error_msg; id = omp_reduction_id (ERROR_MARK, id, type); tree decl = lookup_name (id); if (decl == NULL_TREE) decl = error_mark_node; id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true, &nonint_cst_expression_p, false, true, false, false, &error_msg, loc); if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (id)) { vec<tree, va_gc> *args = NULL; vec_safe_push (args, build_reference_type (type)); id = perform_koenig_lookup (id, args, tf_none); } } else if (TREE_CODE (id) == SCOPE_REF) id = lookup_qualified_name (TREE_OPERAND (id, 0), omp_reduction_id (ERROR_MARK, TREE_OPERAND (id, 1), type), LOOK_want::NORMAL, false); tree fns = id; id = NULL_TREE; if (fns && is_overloaded_fn (fns)) { for (lkp_iterator iter (get_fns (fns)); iter; ++iter) { tree fndecl = *iter; if (TREE_CODE (fndecl) == FUNCTION_DECL) { tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); if (same_type_p (TREE_TYPE (argtype), type)) { id = fndecl; break; } } } if (id && BASELINK_P (fns)) { if (baselinkp) *baselinkp = fns; else baselink = fns; } } if (!id && CLASS_TYPE_P (type) && TYPE_BINFO (type)) { vec<tree> ambiguous = vNULL; tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE; unsigned int ix; if (ambiguousp == NULL) ambiguousp = &ambiguous; for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++) { id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo), baselinkp ? baselinkp : &baselink, ambiguousp); if (id == NULL_TREE) continue; if (!ambiguousp->is_empty ()) ambiguousp->safe_push (id); else if (ret != NULL_TREE) { ambiguousp->safe_push (ret); ambiguousp->safe_push (id); ret = NULL_TREE; } else ret = id; } if (ambiguousp != &ambiguous) return ret; if (!ambiguous.is_empty ()) { const char *str = _("candidates are:"); unsigned int idx; tree udr; error_at (loc, "user defined reduction lookup is ambiguous"); FOR_EACH_VEC_ELT (ambiguous, idx, udr) { inform (DECL_SOURCE_LOCATION (udr), "%s %#qD", str, udr); if (idx == 0) str = get_spaces (str); } ambiguous.release (); ret = error_mark_node; baselink = NULL_TREE; } id = ret; } if (id && baselink) perform_or_defer_access_check (BASELINK_BINFO (baselink), id, id, tf_warning_or_error); return id; } /* Helper function for cp_parser_omp_declare_reduction_exprs and tsubst_omp_udr. Remove CLEANUP_STMT for data (omp_priv variable). Also append INIT_EXPR for DECL_INITIAL of omp_priv after its DECL_EXPR. */ tree cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data) { if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data) *tp = CLEANUP_BODY (*tp); else if (TREE_CODE (*tp) == DECL_EXPR) { tree decl = DECL_EXPR_DECL (*tp); if (!processing_template_decl && decl == (tree) data && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { tree list = NULL_TREE; append_to_statement_list_force (*tp, &list); tree init_expr = build2 (INIT_EXPR, void_type_node, decl, DECL_INITIAL (decl)); DECL_INITIAL (decl) = NULL_TREE; append_to_statement_list_force (init_expr, &list); *tp = list; } } return NULL_TREE; } /* Data passed from cp_check_omp_declare_reduction to cp_check_omp_declare_reduction_r. */ struct cp_check_omp_declare_reduction_data { location_t loc; tree stmts[7]; bool combiner_p; }; /* Helper function for cp_check_omp_declare_reduction, called via cp_walk_tree. */ static tree cp_check_omp_declare_reduction_r (tree *tp, int *, void *data) { struct cp_check_omp_declare_reduction_data *udr_data = (struct cp_check_omp_declare_reduction_data *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3]) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4])) { location_t loc = udr_data->loc; if (udr_data->combiner_p) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } /* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */ bool cp_check_omp_declare_reduction (tree udr) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr))); gcc_assert (TYPE_REF_P (type)); type = TREE_TYPE (type); int i; location_t loc = DECL_SOURCE_LOCATION (udr); if (type == error_mark_node) return false; if (ARITHMETIC_TYPE_P (type)) { static enum tree_code predef_codes[] = { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR, BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR }; for (i = 0; i < 8; i++) { tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE); const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr)); const char *n2 = IDENTIFIER_POINTER (id); if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0 && (n1[IDENTIFIER_LENGTH (id)] == '~' || n1[IDENTIFIER_LENGTH (id)] == '\0')) break; } if (i == 8 && TREE_CODE (type) != COMPLEX_EXPR) { const char prefix_minmax[] = "omp declare reduction m"; size_t prefix_size = sizeof (prefix_minmax) - 1; const char *n = IDENTIFIER_POINTER (DECL_NAME (udr)); if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)), prefix_minmax, prefix_size) == 0 && ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n') || (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x')) && (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0')) i = 0; } if (i < 8) { error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); return false; } } else if (FUNC_OR_METHOD_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); return false; } else if (TYPE_REF_P (type)) { error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>", type); return false; } else if (TYPE_QUALS_NO_ADDR_SPACE (type)) { error_at (loc, "%<const%>, %<volatile%> or %<__restrict%>-qualified " "type %qT in %<#pragma omp declare reduction%>", type); return false; } tree body = DECL_SAVED_TREE (udr); if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST) return true; tree_stmt_iterator tsi; struct cp_check_omp_declare_reduction_data data; memset (data.stmts, 0, sizeof data.stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) data.stmts[i] = tsi_stmt (tsi); data.loc = loc; gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR && TREE_CODE (data.stmts[1]) == DECL_EXPR); if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0]))) return true; data.combiner_p = true; if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; } if (i >= 6) { gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR && TREE_CODE (data.stmts[4]) == DECL_EXPR); data.combiner_p = false; if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r, &data, NULL) || cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])), cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; if (i == 7) gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR); } return true; } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of finish_omp_clauses, called via cp_walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C. Return true if there is some error and the clause should be removed. */ static bool finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor) { tree t = OMP_CLAUSE_DECL (c); bool predefined = false; if (TREE_CODE (t) == TREE_LIST) { gcc_assert (processing_template_decl); return false; } tree type = TREE_TYPE (t); if (TREE_CODE (t) == MEM_REF) type = TREE_TYPE (type); if (TYPE_REF_P (type)) type = TREE_TYPE (type); if (TREE_CODE (type) == ARRAY_TYPE) { tree oatype = type; gcc_assert (TREE_CODE (t) != MEM_REF); while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype), TYPE_SIZE_UNIT (type)); if (integer_zerop (size)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<reduction%> clause is a zero size array", omp_clause_printable_decl (t)); return true; } size = size_binop (MINUS_EXPR, size, size_one_node); size = save_expr (size); tree index_type = build_index_type (size); tree atype = build_array_type (type, index_type); tree ptype = build_pointer_type (type); if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0)); OMP_CLAUSE_DECL (c) = t; } } if (type == error_mark_node) return true; else if (ARITHMETIC_TYPE_P (type)) switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: predefined = true; break; case MIN_EXPR: case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) break; predefined = true; break; default: break; } else if (TYPE_READONLY (type)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE has const type for %<reduction%>", omp_clause_printable_decl (t)); return true; } else if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; OMP_CLAUSE_DECL (c) = t; } if (predefined) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; return false; } else if (processing_template_decl) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == error_mark_node) return true; return false; } tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (type); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; if (id == NULL_TREE) id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c), NULL_TREE, NULL_TREE); id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL); if (id) { if (id == error_mark_node) return true; mark_used (id); tree body = DECL_SAVED_TREE (id); if (!body) return true; if (TREE_CODE (body) == STATEMENT_LIST) { tree_stmt_iterator tsi; tree placeholder = NULL_TREE, decl_placeholder = NULL_TREE; int i; tree stmts[7]; tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id))); atype = TREE_TYPE (atype); bool need_static_cast = !same_type_p (type, atype); memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; if (TREE_CODE (t) == MEM_REF) { decl_placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl_placeholder) = 1; DECL_IGNORED_P (decl_placeholder) = 1; OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder; } if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0]))) cxx_mark_addressable (placeholder); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1])) && (decl_placeholder || !TYPE_REF_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))) cxx_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); tree omp_out = placeholder; tree omp_in = decl_placeholder ? decl_placeholder : convert_from_reference (OMP_CLAUSE_DECL (c)); if (need_static_cast) { tree rtype = build_reference_type (atype); omp_out = build_static_cast (input_location, rtype, omp_out, tf_warning_or_error); omp_in = build_static_cast (input_location, rtype, omp_in, tf_warning_or_error); if (omp_out == error_mark_node || omp_in == error_mark_node) return true; omp_out = convert_from_reference (omp_out); omp_in = convert_from_reference (omp_in); } OMP_CLAUSE_REDUCTION_MERGE (c) = clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]), DECL_EXPR_DECL (stmts[1]), omp_in, omp_out); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3])) && (decl_placeholder || !TYPE_REF_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))) cxx_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4]))) cxx_mark_addressable (placeholder); tree omp_priv = decl_placeholder ? decl_placeholder : convert_from_reference (OMP_CLAUSE_DECL (c)); tree omp_orig = placeholder; if (need_static_cast) { if (i == 7) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction with constructor " "initializer for base class %qT", atype); return true; } tree rtype = build_reference_type (atype); omp_priv = build_static_cast (input_location, rtype, omp_priv, tf_warning_or_error); omp_orig = build_static_cast (input_location, rtype, omp_orig, tf_warning_or_error); if (omp_priv == error_mark_node || omp_orig == error_mark_node) return true; omp_priv = convert_from_reference (omp_priv); omp_orig = convert_from_reference (omp_orig); } if (i == 6) *need_default_ctor = true; OMP_CLAUSE_REDUCTION_INIT (c) = clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]), DECL_EXPR_DECL (stmts[3]), omp_priv, omp_orig); if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else if (i >= 3) { if (CLASS_TYPE_P (type) && !pod_type_p (type)) *need_default_ctor = true; else { tree init; tree v = decl_placeholder ? decl_placeholder : convert_from_reference (t); if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } } } } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) *need_dtor = true; else { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction not found for %qE", omp_clause_printable_decl (t)); return true; } if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF) gcc_assert (TYPE_SIZE_UNIT (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST); return false; } /* Called from finish_struct_1. linear(this) or linear(this:step) clauses might not be finalized yet because the class has been incomplete when parsing #pragma omp declare simd methods. Fix those up now. */ void finish_omp_declare_simd_methods (tree t) { if (processing_template_decl) return; for (tree x = TYPE_FIELDS (t); x; x = DECL_CHAIN (x)) { if (TREE_CODE (x) == USING_DECL || !DECL_NONSTATIC_MEMBER_FUNCTION_P (x)) continue; tree ods = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (x)); if (!ods || !TREE_VALUE (ods)) continue; for (tree c = TREE_VALUE (TREE_VALUE (ods)); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && integer_zerop (OMP_CLAUSE_DECL (c)) && OMP_CLAUSE_LINEAR_STEP (c) && TYPE_PTR_P (TREE_TYPE (OMP_CLAUSE_LINEAR_STEP (c)))) { tree s = OMP_CLAUSE_LINEAR_STEP (c); s = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, s); s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MULT_EXPR, sizetype, s, TYPE_SIZE_UNIT (t)); OMP_CLAUSE_LINEAR_STEP (c) = s; } } } /* Adjust sink depend clause to take into account pointer offsets. Return TRUE if there was a problem processing the offset, and the whole clause should be removed. */ static bool cp_finish_omp_clause_depend_sink (tree sink_clause) { tree t = OMP_CLAUSE_DECL (sink_clause); gcc_assert (TREE_CODE (t) == TREE_LIST); /* Make sure we don't adjust things twice for templates. */ if (processing_template_decl) return false; for (; t; t = TREE_CHAIN (t)) { tree decl = TREE_VALUE (t); if (TYPE_PTR_P (TREE_TYPE (decl))) { tree offset = TREE_PURPOSE (t); bool neg = wi::neg_p (wi::to_wide (offset)); offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset); decl = mark_rvalue_use (decl); decl = convert_from_reference (decl); tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (sink_clause), neg ? MINUS_EXPR : PLUS_EXPR, decl, offset); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (sink_clause), MINUS_EXPR, sizetype, fold_convert (sizetype, t2), fold_convert (sizetype, decl)); if (t2 == error_mark_node) return true; TREE_PURPOSE (t) = t2; } } return false; } /* Finish OpenMP iterators ITER. Return true if they are errorneous and clauses containing them should be removed. */ static bool cp_omp_finish_iterators (tree iter) { bool ret = false; for (tree it = iter; it; it = TREE_CHAIN (it)) { tree var = TREE_VEC_ELT (it, 0); tree begin = TREE_VEC_ELT (it, 1); tree end = TREE_VEC_ELT (it, 2); tree step = TREE_VEC_ELT (it, 3); tree orig_step; tree type = TREE_TYPE (var); location_t loc = DECL_SOURCE_LOCATION (var); if (type == error_mark_node) { ret = true; continue; } if (type_dependent_expression_p (var)) continue; if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { error_at (loc, "iterator %qD has neither integral nor pointer type", var); ret = true; continue; } else if (TYPE_READONLY (type)) { error_at (loc, "iterator %qD has const qualified type", var); ret = true; continue; } if (type_dependent_expression_p (begin) || type_dependent_expression_p (end) || type_dependent_expression_p (step)) continue; else if (error_operand_p (step)) { ret = true; continue; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (step))) { error_at (EXPR_LOC_OR_LOC (step, loc), "iterator step with non-integral type"); ret = true; continue; } begin = mark_rvalue_use (begin); end = mark_rvalue_use (end); step = mark_rvalue_use (step); begin = cp_build_c_cast (input_location, type, begin, tf_warning_or_error); end = cp_build_c_cast (input_location, type, end, tf_warning_or_error); orig_step = step; if (!processing_template_decl) step = orig_step = save_expr (step); tree stype = POINTER_TYPE_P (type) ? sizetype : type; step = cp_build_c_cast (input_location, stype, step, tf_warning_or_error); if (POINTER_TYPE_P (type) && !processing_template_decl) { begin = save_expr (begin); step = pointer_int_sum (loc, PLUS_EXPR, begin, step); step = fold_build2_loc (loc, MINUS_EXPR, sizetype, fold_convert (sizetype, step), fold_convert (sizetype, begin)); step = fold_convert (ssizetype, step); } if (!processing_template_decl) { begin = maybe_constant_value (begin); end = maybe_constant_value (end); step = maybe_constant_value (step); orig_step = maybe_constant_value (orig_step); } if (integer_zerop (step)) { error_at (loc, "iterator %qD has zero step", var); ret = true; continue; } if (begin == error_mark_node || end == error_mark_node || step == error_mark_node || orig_step == error_mark_node) { ret = true; continue; } if (!processing_template_decl) { begin = fold_build_cleanup_point_expr (TREE_TYPE (begin), begin); end = fold_build_cleanup_point_expr (TREE_TYPE (end), end); step = fold_build_cleanup_point_expr (TREE_TYPE (step), step); orig_step = fold_build_cleanup_point_expr (TREE_TYPE (orig_step), orig_step); } hash_set<tree> pset; tree it2; for (it2 = TREE_CHAIN (it); it2; it2 = TREE_CHAIN (it2)) { tree var2 = TREE_VEC_ELT (it2, 0); tree begin2 = TREE_VEC_ELT (it2, 1); tree end2 = TREE_VEC_ELT (it2, 2); tree step2 = TREE_VEC_ELT (it2, 3); location_t loc2 = DECL_SOURCE_LOCATION (var2); if (cp_walk_tree (&begin2, find_omp_placeholder_r, var, &pset)) { error_at (EXPR_LOC_OR_LOC (begin2, loc2), "begin expression refers to outer iterator %qD", var); break; } else if (cp_walk_tree (&end2, find_omp_placeholder_r, var, &pset)) { error_at (EXPR_LOC_OR_LOC (end2, loc2), "end expression refers to outer iterator %qD", var); break; } else if (cp_walk_tree (&step2, find_omp_placeholder_r, var, &pset)) { error_at (EXPR_LOC_OR_LOC (step2, loc2), "step expression refers to outer iterator %qD", var); break; } } if (it2) { ret = true; continue; } TREE_VEC_ELT (it, 1) = begin; TREE_VEC_ELT (it, 2) = end; if (processing_template_decl) TREE_VEC_ELT (it, 3) = orig_step; else { TREE_VEC_ELT (it, 3) = step; TREE_VEC_ELT (it, 4) = orig_step; } } return ret; } /* Ensure that pointers are used in OpenACC attach and detach clauses. Return true if an error has been detected. */ static bool cp_oacc_check_attachments (tree c) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) return false; /* OpenACC attach / detach clauses must be pointers. */ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH) { tree t = OMP_CLAUSE_DECL (c); tree type; while (TREE_CODE (t) == TREE_LIST) t = TREE_CHAIN (t); type = TREE_TYPE (t); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (TREE_CODE (type) != POINTER_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "expected pointer in %qs clause", c_omp_map_clause_name (c, true)); return true; } } return false; } /* For all elements of CLAUSES, validate them vs OpenMP constraints. Remove any elements from the list that are invalid. */ tree finish_omp_clauses (tree clauses, enum c_omp_region_type ort) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head; tree c, t, *pc; tree safelen = NULL_TREE; bool branch_seen = false; bool copyprivate_seen = false; bool ordered_seen = false; bool order_seen = false; bool schedule_seen = false; bool oacc_async = false; tree last_iterators = NULL_TREE; bool last_iterators_remove = false; /* 1 if normal/task reduction has been seen, -1 if inscan reduction has been seen, -2 if mixed inscan/normal reduction diagnosed. */ int reduction_seen = 0; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); /* If ort == C_ORT_OMP_DECLARE_SIMD used as uniform_head instead. */ bitmap_initialize (&map_head, &bitmap_default_obstack); bitmap_initialize (&map_field_head, &bitmap_default_obstack); /* If ort == C_ORT_OMP used as nontemporal_head or use_device_xxx_head instead. */ bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack); if (ort & C_ORT_ACC) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC) { oacc_async = true; break; } for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; bool field_ok = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_PRIVATE: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_REDUCTION: if (reduction_seen == 0) reduction_seen = OMP_CLAUSE_REDUCTION_INSCAN (c) ? -1 : 1; else if (reduction_seen != -2 && reduction_seen != (OMP_CLAUSE_REDUCTION_INSCAN (c) ? -1 : 1)) { error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> and non-%<inscan%> %<reduction%> clauses " "on the same construct"); reduction_seen = -2; } /* FALLTHRU */ case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) { remove = true; break; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_INSCAN (c)) { error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause with array " "section"); remove = true; break; } if (TREE_CODE (t) == TREE_LIST) { while (TREE_CODE (t) == TREE_LIST) t = TREE_CHAIN (t); } else { gcc_assert (TREE_CODE (t) == MEM_REF); t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == POINTER_PLUS_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == ADDR_EXPR || INDIRECT_REF_P (t)) t = TREE_OPERAND (t, 0); } tree n = omp_clause_decl_field (t); if (n) t = n; goto check_dup_generic_t; } if (oacc_async) cxx_mark_addressable (t); goto check_dup_generic; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_COPYIN: goto check_dup_generic; case OMP_CLAUSE_LINEAR: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_OMP_DECLARE_SIMD && OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT) { error_at (OMP_CLAUSE_LOCATION (c), "modifier should not be specified in %<linear%> " "clause on %<simd%> or %<for%> constructs"); OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT; } if ((VAR_P (t) || TREE_CODE (t) == PARM_DECL) && !type_dependent_expression_p (t)) { tree type = TREE_TYPE (t); if ((OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF || OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_UVAL) && !TYPE_REF_P (type)) { error_at (OMP_CLAUSE_LOCATION (c), "linear clause with %qs modifier applied to " "non-reference variable with %qT type", OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF ? "ref" : "uval", TREE_TYPE (t)); remove = true; break; } if (TYPE_REF_P (type)) type = TREE_TYPE (type); if (OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_REF) { if (!INTEGRAL_TYPE_P (type) && !TYPE_PTR_P (type)) { error_at (OMP_CLAUSE_LOCATION (c), "linear clause applied to non-integral " "non-pointer variable with %qT type", TREE_TYPE (t)); remove = true; break; } } } t = OMP_CLAUSE_LINEAR_STEP (c); if (t == NULL_TREE) t = integer_one_node; if (t == error_mark_node) { remove = true; break; } else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t)) && (ort != C_ORT_OMP_DECLARE_SIMD || TREE_CODE (t) != PARM_DECL || !TYPE_REF_P (TREE_TYPE (t)) || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (t))))) { error_at (OMP_CLAUSE_LOCATION (c), "linear step expression must be integral"); remove = true; break; } else { t = mark_rvalue_use (t); if (ort == C_ORT_OMP_DECLARE_SIMD && TREE_CODE (t) == PARM_DECL) { OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1; goto check_dup_generic; } if (!processing_template_decl && (VAR_P (OMP_CLAUSE_DECL (c)) || TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL)) { if (ort == C_ORT_OMP_DECLARE_SIMD) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step %qE is neither " "constant nor a parameter", t); remove = true; break; } } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); tree type = TREE_TYPE (OMP_CLAUSE_DECL (c)); if (TYPE_REF_P (type)) type = TREE_TYPE (type); if (OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF) { type = build_pointer_type (type); tree d = fold_convert (type, OMP_CLAUSE_DECL (c)); t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, d, t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t), fold_convert (sizetype, d)); if (t == error_mark_node) { remove = true; break; } } else if (TYPE_PTR_P (type) /* Can't multiply the step yet if *this is still incomplete type. */ && (ort != C_ORT_OMP_DECLARE_SIMD || TREE_CODE (OMP_CLAUSE_DECL (c)) != PARM_DECL || !DECL_ARTIFICIAL (OMP_CLAUSE_DECL (c)) || DECL_NAME (OMP_CLAUSE_DECL (c)) != this_identifier || !TYPE_BEING_DEFINED (TREE_TYPE (type)))) { tree d = convert_from_reference (OMP_CLAUSE_DECL (c)); t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, d, t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t), fold_convert (sizetype, d)); if (t == error_mark_node) { remove = true; break; } } else t = fold_convert (type, t); } OMP_CLAUSE_LINEAR_STEP (c) = t; } goto check_dup_generic; check_dup_generic: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) { if (!remove && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); } else t = OMP_CLAUSE_DECL (c); check_dup_generic_t: if (t == current_class_ptr && ((ort != C_ORT_OMP_DECLARE_SIMD && ort != C_ORT_ACC) || (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_UNIFORM))) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && (!field_ok || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if ((ort == C_ORT_ACC && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) || (ort == C_ORT_OMP && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR)))) { if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), ort == C_ORT_ACC ? "%qD appears more than once in reduction clauses" : "%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&oacc_reduction_head, DECL_UID (t)); } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); if (!field_ok) break; handle_field_decl: if (!remove && TREE_CODE (t) == FIELD_DECL && t == OMP_CLAUSE_DECL (c)) { OMP_CLAUSE_DECL (c) = omp_privatize_field (t, (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)); if (OMP_CLAUSE_DECL (c) == error_mark_node) remove = true; } break; case OMP_CLAUSE_FIRSTPRIVATE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); else t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %<firstprivate%>", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); goto handle_field_decl; case OMP_CLAUSE_LASTPRIVATE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); else t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %<lastprivate%>", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); goto handle_field_decl; case OMP_CLAUSE_IF: t = OMP_CLAUSE_IF_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_IF_EXPR (c) = t; break; case OMP_CLAUSE_FINAL: t = OMP_CLAUSE_FINAL_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_FINAL_EXPR (c) = t; break; case OMP_CLAUSE_GANG: /* Operand 1 is the gang static: argument. */ t = OMP_CLAUSE_OPERAND (c, 1); if (t != NULL_TREE) { if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<gang%> static expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1 && t != integer_minus_one_node) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<gang%> static value must be " "positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } OMP_CLAUSE_OPERAND (c, 1) = t; } /* Check operand 0, the num argument. */ /* FALLTHRU */ case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: if (OMP_CLAUSE_OPERAND (c, 0) == NULL_TREE) break; /* FALLTHRU */ case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_GANG: error_at (OMP_CLAUSE_LOCATION (c), "%<gang%> num expression must be integral"); break; case OMP_CLAUSE_VECTOR: error_at (OMP_CLAUSE_LOCATION (c), "%<vector%> length expression must be integral"); break; case OMP_CLAUSE_WORKER: error_at (OMP_CLAUSE_LOCATION (c), "%<worker%> num expression must be integral"); break; default: error_at (OMP_CLAUSE_LOCATION (c), "%qs expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); } remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_GANG: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<gang%> num value must be positive"); break; case OMP_CLAUSE_VECTOR: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<vector%> length value must be " "positive"); break; case OMP_CLAUSE_WORKER: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<worker%> num value must be " "positive"); break; default: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%qs value must be positive", omp_clause_code_name [OMP_CLAUSE_CODE (c)]); } t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_OPERAND (c, 0) = t; } break; case OMP_CLAUSE_SCHEDULE: t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "schedule chunk size expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "chunk size value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } if (!remove) schedule_seen = true; break; case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_SAFELEN: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qs length expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error_at (OMP_CLAUSE_LOCATION (c), "%qs length expression must be positive " "constant integer expression", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } OMP_CLAUSE_OPERAND (c, 0) = t; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SAFELEN) safelen = c; } break; case OMP_CLAUSE_ASYNC: t = OMP_CLAUSE_ASYNC_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<async%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_ASYNC_EXPR (c) = t; } break; case OMP_CLAUSE_WAIT: t = OMP_CLAUSE_WAIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_WAIT_EXPR (c) = t; break; case OMP_CLAUSE_THREAD_LIMIT: t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<thread_limit%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<thread_limit%> value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; } break; case OMP_CLAUSE_DEVICE: t = OMP_CLAUSE_DEVICE_ID (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<device%> id must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DEVICE_ID (c) = t; } break; case OMP_CLAUSE_DIST_SCHEDULE: t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<dist_schedule%> chunk size expression must be " "integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (t == current_class_ptr && ort != C_ORT_OMP_DECLARE_SIMD) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %<aligned%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!type_dependent_expression_p (t) && !TYPE_PTR_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE && (!TYPE_REF_P (TREE_TYPE (t)) || (!INDIRECT_TYPE_P (TREE_TYPE (TREE_TYPE (t))) && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) != ARRAY_TYPE)))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array nor a reference to pointer or array", t); remove = true; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c); if (t == error_mark_node) remove = true; else if (t == NULL_TREE) break; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<aligned%> clause alignment expression must " "be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error_at (OMP_CLAUSE_LOCATION (c), "%<aligned%> clause alignment expression must " "be positive constant integer expression"); remove = true; } else t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t; } break; case OMP_CLAUSE_NONTEMPORAL: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %<nontemporal%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %<nontemporal%> clause", t); remove = true; } else if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in %<nontemporal%> " "clauses", t); remove = true; } else bitmap_set_bit (&oacc_reduction_head, DECL_UID (t)); break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (t == NULL_TREE) { gcc_assert (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE); break; } if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) { if (cp_finish_omp_clause_depend_sink (c)) remove = true; break; } if (TREE_CODE (t) == TREE_LIST && TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) { if (TREE_PURPOSE (t) != last_iterators) last_iterators_remove = cp_omp_finish_iterators (TREE_PURPOSE (t)); last_iterators = TREE_PURPOSE (t); t = TREE_VALUE (t); if (last_iterators_remove) t = error_mark_node; } else last_iterators = NULL_TREE; if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ) { error_at (OMP_CLAUSE_LOCATION (c), "%<depend%> clause with %<depobj%> dependence " "type on array section"); remove = true; } break; } if (t == error_mark_node) remove = true; else if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; } else if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; else if (!lvalue_p (t)) { if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not lvalue expression nor array section " "in %<depend%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not lvalue expression nor array section " "in %<depend%> clause", t); remove = true; } else if (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, "depend"); remove = true; } else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ) { if (!c_omp_depend_t_p (TYPE_REF_P (TREE_TYPE (t)) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have %<omp_depend_t%> type in " "%<depend%> clause with %<depobj%> dependence " "type", t); remove = true; } } else if (c_omp_depend_t_p (TYPE_REF_P (TREE_TYPE (t)) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE should not have %<omp_depend_t%> type in " "%<depend%> clause with dependence type other than " "%<depobj%>", t); remove = true; } if (!remove) { tree addr = cp_build_addr_expr (t, tf_warning_or_error); if (addr == error_mark_node) remove = true; else { t = cp_build_indirect_ref (OMP_CLAUSE_LOCATION (c), addr, RO_UNARY_STAR, tf_warning_or_error); if (t == error_mark_node) remove = true; else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST && TREE_PURPOSE (OMP_CLAUSE_DECL (c)) && (TREE_CODE (TREE_PURPOSE (OMP_CLAUSE_DECL (c))) == TREE_VEC)) TREE_VALUE (OMP_CLAUSE_DECL (c)) = t; else OMP_CLAUSE_DECL (c) = t; } } break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != TREE_LIST && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } while (TREE_CODE (t) == ARRAY_REF) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { while (TREE_CODE (t) == COMPONENT_REF) t = TREE_OPERAND (t, 0); if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); if (bitmap_bit_p (&map_field_head, DECL_UID (t))) break; if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in motion" " clauses", t); else if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data" " clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in map" " clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); bitmap_set_bit (&map_field_head, DECL_UID (t)); } } } if (cp_oacc_check_attachments (c)) remove = true; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)) /* In this case, we have a single array element which is a pointer, and we already set OMP_CLAUSE_SIZE in handle_omp_array_sections above. For attach/detach clauses, reset the OMP_CLAUSE_SIZE (representing a bias) to zero here. */ OMP_CLAUSE_SIZE (c) = size_zero_node; break; } if (t == error_mark_node) { remove = true; break; } /* OpenACC attach / detach clauses must be pointers. */ if (cp_oacc_check_attachments (c)) { remove = true; break; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)) /* For attach/detach clauses, set OMP_CLAUSE_SIZE (representing a bias) to zero here, so it is not set erroneously to the pointer size later on in gimplify.c. */ OMP_CLAUSE_SIZE (c) = size_zero_node; if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) { t = TREE_OPERAND (t, 0); OMP_CLAUSE_DECL (c) = t; } if (ort == C_ORT_ACC && TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (t, 0)) == INDIRECT_REF) t = TREE_OPERAND (TREE_OPERAND (t, 0), 0); if (TREE_CODE (t) == COMPONENT_REF && ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP || ort == C_ORT_ACC) && OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_) { if (type_dependent_expression_p (t)) break; if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_TYPE (TREE_OPERAND (t, 0)) && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); remove = true; break; } t = TREE_OPERAND (t, 0); } if (remove) break; if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); if (VAR_P (t) || TREE_CODE (t) == PARM_DECL) { if (bitmap_bit_p (&map_field_head, DECL_UID (t))) goto handle_map_references; } } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } else if (!processing_template_decl && !TYPE_REF_P (TREE_TYPE (t)) && (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER)) && !cxx_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER))) && t == OMP_CLAUSE_DECL (c) && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TYPE_REF_P (TREE_TYPE (t)) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FORCE_DEVICEPTR && !type_dependent_expression_p (t) && !INDIRECT_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a pointer variable", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) { if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); } else if (bitmap_bit_p (&map_head, DECL_UID (t)) && (ort != C_ORT_ACC || !bitmap_bit_p (&map_field_head, DECL_UID (t)))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in motion clauses", t); if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in map clauses", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); if (t != OMP_CLAUSE_DECL (c) && TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF) bitmap_set_bit (&map_field_head, DECL_UID (t)); } handle_map_references: if (!remove && !processing_template_decl && ort != C_ORT_DECLARE_SIMD && TYPE_REF_P (TREE_TYPE (OMP_CLAUSE_DECL (c)))) { t = OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) { OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t); if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))); } else if (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_REFERENCE) && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER)) { tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if (TREE_CODE (t) == COMPONENT_REF) { gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH : GOMP_MAP_ALWAYS_POINTER; OMP_CLAUSE_SET_MAP_KIND (c2, k); } else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_REFERENCE); OMP_CLAUSE_DECL (c2) = t; OMP_CLAUSE_SIZE (c2) = size_zero_node; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t); if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))); c = c2; } } break; case OMP_CLAUSE_TO_DECLARE: case OMP_CLAUSE_LINK: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == FUNCTION_DECL && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) ; else if (!VAR_P (t)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) { if (TREE_CODE (t) == TEMPLATE_ID_EXPR) error_at (OMP_CLAUSE_LOCATION (c), "template %qE in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else if (really_overloaded_fn (t)) error_at (OMP_CLAUSE_LOCATION (c), "overloaded function name %qE in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is neither a variable nor a function name " "in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); } else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } if (remove) break; if (bitmap_bit_p (&generic_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once on the same " "%<declare target%> directive", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an argument in %<uniform%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not an argument in %<uniform%> clause", t); remove = true; break; } /* map_head bitmap is used as uniform_head if declare_simd. */ bitmap_set_bit (&map_head, DECL_UID (t)); goto check_dup_generic; case OMP_CLAUSE_GRAINSIZE: t = OMP_CLAUSE_GRAINSIZE_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<grainsize%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<grainsize%> value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_GRAINSIZE_EXPR (c) = t; } break; case OMP_CLAUSE_PRIORITY: t = OMP_CLAUSE_PRIORITY_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<priority%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) == -1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<priority%> value must be non-negative"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_PRIORITY_EXPR (c) = t; } break; case OMP_CLAUSE_HINT: t = OMP_CLAUSE_HINT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<hint%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); if (TREE_CODE (t) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "%<hint%> expression must be constant integer " "expression"); remove = true; } } OMP_CLAUSE_HINT_EXPR (c) = t; } break; case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_PTR: field_ok = (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP; t = OMP_CLAUSE_DECL (c); if (!type_dependent_expression_p (t)) { tree type = TREE_TYPE (t); if (!TYPE_PTR_P (type) && (!TYPE_REF_P (type) || !TYPE_PTR_P (TREE_TYPE (type)))) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR && ort == C_ORT_OMP) { error_at (OMP_CLAUSE_LOCATION (c), "%qs variable is neither a pointer " "nor reference to pointer", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TREE_CODE (type) != ARRAY_TYPE && (!TYPE_REF_P (type) || TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE)) { error_at (OMP_CLAUSE_LOCATION (c), "%qs variable is neither a pointer, nor an " "array nor reference to pointer or array", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } } goto check_dup_generic; case OMP_CLAUSE_USE_DEVICE_ADDR: field_ok = true; t = OMP_CLAUSE_DECL (c); if (!processing_template_decl && (VAR_P (t) || TREE_CODE (t) == PARM_DECL) && !TYPE_REF_P (TREE_TYPE (t)) && !cxx_mark_addressable (t)) remove = true; goto check_dup_generic; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_DEVICE_TYPE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_BIND: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_IF_PRESENT: case OMP_CLAUSE_FINALIZE: break; case OMP_CLAUSE_TILE: for (tree list = OMP_CLAUSE_TILE_LIST (c); !remove && list; list = TREE_CHAIN (list)) { t = TREE_VALUE (list); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<tile%> argument needs integral type"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { /* Zero is used to indicate '*', we permit you to get there via an ICE of value zero. */ t = maybe_constant_value (t); if (!tree_fits_shwi_p (t) || tree_to_shwi (t) < 0) { error_at (OMP_CLAUSE_LOCATION (c), "%<tile%> argument needs positive " "integral constant"); remove = true; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } /* Update list item. */ TREE_VALUE (list) = t; } break; case OMP_CLAUSE_ORDERED: ordered_seen = true; break; case OMP_CLAUSE_ORDER: if (order_seen) remove = true; else order_seen = true; break; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; } branch_seen = true; break; case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (!t) t = OMP_CLAUSE_DECL (c); if (t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && TREE_CODE (t) != FIELD_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } break; default: gcc_unreachable (); } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } if (reduction_seen < 0 && (ordered_seen || schedule_seen)) reduction_seen = -2; for (pc = &clauses, c = clauses; c ; c = *pc) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); bool remove = false; bool need_complete_type = false; bool need_default_ctor = false; bool need_copy_ctor = false; bool need_copy_assignment = false; bool need_implicitly_determined = false; bool need_dtor = false; tree type, inner_type; switch (c_kind) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; break; case OMP_CLAUSE_PRIVATE: need_complete_type = true; need_default_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_FIRSTPRIVATE: need_complete_type = true; need_copy_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_LASTPRIVATE: need_complete_type = true; need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_REDUCTION: if (reduction_seen == -2) OMP_CLAUSE_REDUCTION_INSCAN (c) = 0; if (OMP_CLAUSE_REDUCTION_INSCAN (c)) need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: need_implicitly_determined = true; break; case OMP_CLAUSE_LINEAR: if (ort != C_ORT_OMP_DECLARE_SIMD) need_implicitly_determined = true; else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) && !bitmap_bit_p (&map_head, DECL_UID (OMP_CLAUSE_LINEAR_STEP (c)))) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step is a parameter %qD not " "specified in %<uniform%> clause", OMP_CLAUSE_LINEAR_STEP (c)); *pc = OMP_CLAUSE_CHAIN (c); continue; } break; case OMP_CLAUSE_COPYPRIVATE: need_copy_assignment = true; break; case OMP_CLAUSE_COPYIN: need_copy_assignment = true; break; case OMP_CLAUSE_SIMDLEN: if (safelen && !processing_template_decl && tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen), OMP_CLAUSE_SIMDLEN_EXPR (c))) { error_at (OMP_CLAUSE_LOCATION (c), "%<simdlen%> clause value is bigger than " "%<safelen%> clause value"); OMP_CLAUSE_SIMDLEN_EXPR (c) = OMP_CLAUSE_SAFELEN_EXPR (safelen); } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_SCHEDULE: if (ordered_seen && (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) { error_at (OMP_CLAUSE_LOCATION (c), "%<nonmonotonic%> schedule modifier specified " "together with %<ordered%> clause"); OMP_CLAUSE_SCHEDULE_KIND (c) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c) & ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC); } if (reduction_seen == -2) error_at (OMP_CLAUSE_LOCATION (c), "%qs clause specified together with %<inscan%> " "%<reduction%> clause", "schedule"); pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_NOGROUP: if (reduction_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nogroup%> clause must not be used together with " "%<reduction%> clause"); *pc = OMP_CLAUSE_CHAIN (c); continue; } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_ORDERED: if (reduction_seen == -2) error_at (OMP_CLAUSE_LOCATION (c), "%qs clause specified together with %<inscan%> " "%<reduction%> clause", "ordered"); pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_ORDER: if (ordered_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<order%> clause must not be used together " "with %<ordered%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } /* FALLTHRU */ default: pc = &OMP_CLAUSE_CHAIN (c); continue; } t = OMP_CLAUSE_DECL (c); if (processing_template_decl && !VAR_P (t) && TREE_CODE (t) != PARM_DECL) { pc = &OMP_CLAUSE_CHAIN (c); continue; } switch (c_kind) { case OMP_CLAUSE_LASTPRIVATE: if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { need_default_ctor = true; need_dtor = true; } break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: if (finish_omp_reduction_clause (c, &need_default_ctor, &need_dtor)) remove = true; else t = OMP_CLAUSE_DECL (c); break; case OMP_CLAUSE_COPYIN: if (!VAR_P (t) || !CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; } break; default: break; } if (need_complete_type || need_copy_assignment) { t = require_complete_type (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl && TYPE_REF_P (TREE_TYPE (t)) && !complete_type_or_else (TREE_TYPE (TREE_TYPE (t)), t)) remove = true; } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (cxx_omp_predetermined_sharing_1 (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) && c_omp_predefined_variable (t)) /* The __func__ variable and similar function-local predefined variables may be listed in a shared or firstprivate clause. */ break; if (VAR_P (t) && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && TREE_STATIC (t) && cxx_omp_const_qual_no_mutable (t)) { tree ctx = CP_DECL_CONTEXT (t); /* const qualified static data members without mutable member may be specified in firstprivate clause. */ if (TYPE_P (ctx) && MAYBE_CLASS_TYPE_P (ctx)) break; } share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is predetermined %qs for %qs", omp_clause_printable_decl (t), share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE && cxx_omp_const_qual_no_mutable (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%<const%> qualified %qE without %<mutable%> member " "may appear only in %<shared%> or %<firstprivate%> " "clauses", omp_clause_printable_decl (t)); remove = true; } } /* We're interested in the base element, not arrays. */ inner_type = type = TREE_TYPE (t); if ((need_complete_type || need_copy_assignment || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) && TYPE_REF_P (inner_type)) inner_type = TREE_TYPE (inner_type); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); /* Check for special function availability by building a call to one. Save the results, because later we won't be in the right context for making these queries. */ if (CLASS_TYPE_P (inner_type) && COMPLETE_TYPE_P (inner_type) && (need_default_ctor || need_copy_ctor || need_copy_assignment || need_dtor) && !type_dependent_expression_p (t) && cxx_omp_create_clause_info (c, inner_type, need_default_ctor, need_copy_ctor, need_copy_assignment, need_dtor)) remove = true; if (!remove && c_kind == OMP_CLAUSE_SHARED && processing_template_decl) { t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) OMP_CLAUSE_DECL (c) = t; } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* Start processing OpenMP clauses that can include any privatization clauses for non-static data members. */ tree push_omp_privatization_clauses (bool ignore_next) { if (omp_private_member_ignore_next) { omp_private_member_ignore_next = ignore_next; return NULL_TREE; } omp_private_member_ignore_next = ignore_next; if (omp_private_member_map) omp_private_member_vec.safe_push (error_mark_node); return push_stmt_list (); } /* Revert remapping of any non-static data members since the last push_omp_privatization_clauses () call. */ void pop_omp_privatization_clauses (tree stmt) { if (stmt == NULL_TREE) return; stmt = pop_stmt_list (stmt); if (omp_private_member_map) { while (!omp_private_member_vec.is_empty ()) { tree t = omp_private_member_vec.pop (); if (t == error_mark_node) { add_stmt (stmt); return; } bool no_decl_expr = t == integer_zero_node; if (no_decl_expr) t = omp_private_member_vec.pop (); tree *v = omp_private_member_map->get (t); gcc_assert (v); if (!no_decl_expr) add_decl_expr (*v); omp_private_member_map->remove (t); } delete omp_private_member_map; omp_private_member_map = NULL; } add_stmt (stmt); } /* Remember OpenMP privatization clauses mapping and clear it. Used for lambdas. */ void save_omp_privatization_clauses (vec<tree> &save) { save = vNULL; if (omp_private_member_ignore_next) save.safe_push (integer_one_node); omp_private_member_ignore_next = false; if (!omp_private_member_map) return; while (!omp_private_member_vec.is_empty ()) { tree t = omp_private_member_vec.pop (); if (t == error_mark_node) { save.safe_push (t); continue; } tree n = t; if (t == integer_zero_node) t = omp_private_member_vec.pop (); tree *v = omp_private_member_map->get (t); gcc_assert (v); save.safe_push (*v); save.safe_push (t); if (n != t) save.safe_push (n); } delete omp_private_member_map; omp_private_member_map = NULL; } /* Restore OpenMP privatization clauses mapping saved by the above function. */ void restore_omp_privatization_clauses (vec<tree> &save) { gcc_assert (omp_private_member_vec.is_empty ()); omp_private_member_ignore_next = false; if (save.is_empty ()) return; if (save.length () == 1 && save[0] == integer_one_node) { omp_private_member_ignore_next = true; save.release (); return; } omp_private_member_map = new hash_map <tree, tree>; while (!save.is_empty ()) { tree t = save.pop (); tree n = t; if (t != error_mark_node) { if (t == integer_one_node) { omp_private_member_ignore_next = true; gcc_assert (save.is_empty ()); break; } if (t == integer_zero_node) t = save.pop (); tree &v = omp_private_member_map->get_or_insert (t); v = save.pop (); } omp_private_member_vec.safe_push (t); if (n != t) omp_private_member_vec.safe_push (n); } save.release (); } /* For all variables in the tree_list VARS, mark them as thread local. */ void finish_omp_threadprivate (tree vars) { tree t; /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); if (error_operand_p (v)) ; else if (!VAR_P (v)) error ("%<threadprivate%> %qD is not file, namespace " "or block scope variable", v); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ else if (TREE_USED (v) && (DECL_LANG_SPECIFIC (v) == NULL || !CP_DECL_THREADPRIVATE_P (v))) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v)))) error ("%<threadprivate%> %qE has incomplete type", v); else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)) && CP_DECL_CONTEXT (v) != current_class_type) error ("%<threadprivate%> %qE directive not " "in %qT definition", v, CP_DECL_CONTEXT (v)); else { /* Allocate a LANG_SPECIFIC structure for V, if needed. */ if (DECL_LANG_SPECIFIC (v) == NULL) retrofit_lang_decl (v); if (! CP_DECL_THREAD_LOCAL_P (v)) { CP_DECL_THREAD_LOCAL_P (v) = true; set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } CP_DECL_THREADPRIVATE_P (v) = 1; } } } /* Build an OpenMP structured block. */ tree begin_omp_structured_block (void) { return do_pushlevel (sk_omp); } tree finish_omp_structured_block (tree block) { return do_poplevel (block); } /* Similarly, except force the retention of the BLOCK. */ tree begin_omp_parallel (void) { keep_next_level (true); return begin_omp_structured_block (); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. */ tree finish_oacc_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound statement. */ tree finish_oacc_host_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_HOST_DATA); TREE_TYPE (stmt) = void_type_node; OACC_HOST_DATA_CLAUSES (stmt) = clauses; OACC_HOST_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OMP construct CODE, with BODY and CLAUSES as its compound statement. */ tree finish_omp_construct (enum tree_code code, tree body, tree clauses) { body = finish_omp_structured_block (body); tree stmt = make_node (code); TREE_TYPE (stmt) = void_type_node; OMP_BODY (stmt) = body; OMP_CLAUSES (stmt) = clauses; return add_stmt (stmt); } tree finish_omp_parallel (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = body; return add_stmt (stmt); } tree begin_omp_task (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_task (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = body; return add_stmt (stmt); } /* Helper function for finish_omp_for. Convert Ith random access iterator into integral iterator. Return FALSE if successful. */ static bool handle_omp_for_class_iterator (int i, location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree *body, tree *pre_body, tree &clauses, int collapse, int ordered) { tree diff, iter_init, iter_incr = NULL, last; tree incr_var = NULL, orig_pre_body, orig_body, c; tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); tree iter = decl; location_t elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: case NE_EXPR: if (TREE_OPERAND (cond, 1) == iter) cond = build2 (swap_tree_comparison (TREE_CODE (cond)), TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0)); if (TREE_OPERAND (cond, 0) != iter) cond = error_mark_node; else { tree tem = build_x_binary_op (EXPR_LOCATION (cond), TREE_CODE (cond), iter, ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (tem)) return true; } break; default: cond = error_mark_node; break; } if (cond == error_mark_node) { error_at (elocus, "invalid controlling predicate"); return true; } diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); diff = cp_fully_fold (diff); if (error_operand_p (diff)) return true; if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE) { error_at (elocus, "difference between %qE and %qD does not have integer type", TREE_OPERAND (cond, 1), iter); return true; } if (!c_omp_check_loop_iv_exprs (locus, orig_declv, i, TREE_VEC_ELT (declv, i), NULL_TREE, cond, cp_walk_subtrees)) return true; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != iter) { incr = error_mark_node; break; } iter_incr = build_x_unary_op (EXPR_LOCATION (incr), TREE_CODE (incr), iter, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; else if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) incr = integer_one_node; else incr = integer_minus_one_node; break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != iter) incr = error_mark_node; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); if (TREE_OPERAND (rhs, 0) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1))) != INTEGER_TYPE) incr = error_mark_node; else { iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, TREE_CODE (rhs), TREE_OPERAND (rhs, 1), tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 1); incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); if (TREE_CODE (rhs) == MINUS_EXPR) { incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr); incr = fold_simple (incr); } if (TREE_CODE (incr) != INTEGER_CST && (TREE_CODE (incr) != NOP_EXPR || (TREE_CODE (TREE_OPERAND (incr, 0)) != INTEGER_CST))) iter_incr = NULL; } } else if (TREE_OPERAND (rhs, 1) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE || TREE_CODE (rhs) != PLUS_EXPR) incr = error_mark_node; else { iter_incr = build_x_binary_op (EXPR_LOCATION (rhs), PLUS_EXPR, TREE_OPERAND (rhs, 0), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, NOP_EXPR, iter_incr, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 0); iter_incr = NULL; } } else incr = error_mark_node; } else incr = error_mark_node; break; default: incr = error_mark_node; break; } if (incr == error_mark_node) { error_at (elocus, "invalid increment expression"); return true; } incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); incr = cp_fully_fold (incr); tree loop_iv_seen = NULL_TREE; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (c) == iter) { if (code == OMP_TASKLOOP || code == OMP_LOOP) { loop_iv_seen = c; OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c) = 1; } break; } else if ((code == OMP_TASKLOOP || code == OMP_LOOP) && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (c) == iter) { loop_iv_seen = c; if (code == OMP_TASKLOOP) OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c) = 1; } decl = create_temporary_var (TREE_TYPE (diff)); pushdecl (decl); add_decl_expr (decl); last = create_temporary_var (TREE_TYPE (diff)); pushdecl (last); add_decl_expr (last); if (c && iter_incr == NULL && TREE_CODE (incr) != INTEGER_CST && (!ordered || (i < collapse && collapse > 1))) { incr_var = create_temporary_var (TREE_TYPE (diff)); pushdecl (incr_var); add_decl_expr (incr_var); } gcc_assert (stmts_are_full_exprs_p ()); tree diffvar = NULL_TREE; if (code == OMP_TASKLOOP) { if (!loop_iv_seen) { tree ivc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (ivc) = iter; cxx_omp_finish_clause (ivc, NULL, false); OMP_CLAUSE_CHAIN (ivc) = clauses; clauses = ivc; } tree lvc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (lvc) = last; OMP_CLAUSE_CHAIN (lvc) = clauses; clauses = lvc; diffvar = create_temporary_var (TREE_TYPE (diff)); pushdecl (diffvar); add_decl_expr (diffvar); } else if (code == OMP_LOOP) { if (!loop_iv_seen) { /* While iterators on the loop construct are predetermined lastprivate, if the decl is not declared inside of the loop, OMP_CLAUSE_LASTPRIVATE should have been added already. */ loop_iv_seen = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (loop_iv_seen) = iter; OMP_CLAUSE_CHAIN (loop_iv_seen) = clauses; clauses = loop_iv_seen; } else if (OMP_CLAUSE_CODE (loop_iv_seen) == OMP_CLAUSE_PRIVATE) { OMP_CLAUSE_PRIVATE_DEBUG (loop_iv_seen) = 0; OMP_CLAUSE_PRIVATE_OUTER_REF (loop_iv_seen) = 0; OMP_CLAUSE_CODE (loop_iv_seen) = OMP_CLAUSE_FIRSTPRIVATE; } if (OMP_CLAUSE_CODE (loop_iv_seen) == OMP_CLAUSE_FIRSTPRIVATE) cxx_omp_finish_clause (loop_iv_seen, NULL, false); } orig_pre_body = *pre_body; *pre_body = push_stmt_list (); if (orig_pre_body) add_stmt (orig_pre_body); if (init != NULL) finish_expr_stmt (build_x_modify_expr (elocus, iter, NOP_EXPR, init, tf_warning_or_error)); init = build_int_cst (TREE_TYPE (diff), 0); if (c && iter_incr == NULL && (!ordered || (i < collapse && collapse > 1))) { if (incr_var) { finish_expr_stmt (build_x_modify_expr (elocus, incr_var, NOP_EXPR, incr, tf_warning_or_error)); incr = incr_var; } iter_incr = build_x_modify_expr (elocus, iter, PLUS_EXPR, incr, tf_warning_or_error); } if (c && ordered && i < collapse && collapse > 1) iter_incr = incr; finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, init, tf_warning_or_error)); if (diffvar) { finish_expr_stmt (build_x_modify_expr (elocus, diffvar, NOP_EXPR, diff, tf_warning_or_error)); diff = diffvar; } *pre_body = pop_stmt_list (*pre_body); cond = cp_build_binary_op (elocus, TREE_CODE (cond), decl, diff, tf_warning_or_error); incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR, elocus, incr, NULL_TREE); orig_body = *body; *body = push_stmt_list (); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, decl, tf_warning_or_error)); add_stmt (orig_body); *body = pop_stmt_list (*body); if (c) { OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list (); if (!ordered) finish_expr_stmt (iter_incr); else { iter_init = decl; if (i < collapse && collapse > 1 && !error_operand_p (iter_incr)) iter_init = build2 (PLUS_EXPR, TREE_TYPE (diff), iter_init, iter_incr); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), iter_init, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); } OMP_CLAUSE_LASTPRIVATE_STMT (c) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c)); } if (TREE_CODE (TREE_VEC_ELT (orig_declv, i)) == TREE_LIST) { tree t = TREE_VEC_ELT (orig_declv, i); gcc_assert (TREE_PURPOSE (t) == NULL_TREE && TREE_VALUE (t) == NULL_TREE && TREE_CODE (TREE_CHAIN (t)) == TREE_VEC); TREE_PURPOSE (t) = TREE_VEC_ELT (declv, i); TREE_VALUE (t) = last; } else TREE_VEC_ELT (orig_declv, i) = tree_cons (TREE_VEC_ELT (declv, i), last, NULL_TREE); TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return false; } /* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR are directly for their associated operands in the statement. DECL and INIT are a combo; if DECL is NULL then INIT ought to be a MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are optional statements that need to go before the loop into its sk_omp scope. */ tree finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, vec<tree> *orig_inits, tree clauses) { tree omp_for = NULL, orig_incr = NULL; tree decl = NULL, init, cond, incr; location_t elocus; int i; int collapse = 1; int ordered = 0; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); if (TREE_VEC_LENGTH (declv) > 1) { tree c; c = omp_find_clause (clauses, OMP_CLAUSE_TILE); if (c) collapse = list_length (OMP_CLAUSE_TILE_LIST (c)); else { c = omp_find_clause (clauses, OMP_CLAUSE_COLLAPSE); if (c) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c)); if (collapse != TREE_VEC_LENGTH (declv)) ordered = TREE_VEC_LENGTH (declv); } } for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (decl == NULL) { if (init != NULL) switch (TREE_CODE (init)) { case MODIFY_EXPR: decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR) { decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 2); } break; default: break; } if (decl == NULL) { error_at (locus, "expected iteration declaration or initialization"); return NULL; } } if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (cond == global_namespace) continue; if (cond == NULL) { error_at (elocus, "missing controlling predicate"); return NULL; } if (incr == NULL) { error_at (elocus, "missing increment expression"); return NULL; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; } if (orig_inits) { bool fail = false; tree orig_init; FOR_EACH_VEC_ELT (*orig_inits, i, orig_init) if (orig_init && !c_omp_check_loop_iv_exprs (locus, orig_declv ? orig_declv : declv, i, TREE_VEC_ELT (declv, i), orig_init, NULL_TREE, cp_walk_subtrees)) fail = true; if (fail) return NULL; } if (dependent_omp_for_p (declv, initv, condv, incrv)) { tree stmt; stmt = make_node (code); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { /* This is really just a place-holder. We'll be decomposing this again and going through the cp_build_modify_expr path below when we instantiate the thing. */ TREE_VEC_ELT (initv, i) = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i), TREE_VEC_ELT (initv, i)); } TREE_TYPE (stmt) = void_type_node; OMP_FOR_INIT (stmt) = initv; OMP_FOR_COND (stmt) = condv; OMP_FOR_INCR (stmt) = incrv; OMP_FOR_BODY (stmt) = body; OMP_FOR_PRE_BODY (stmt) = pre_body; OMP_FOR_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, locus); return add_stmt (stmt); } if (!orig_declv) orig_declv = copy_node (declv); if (processing_template_decl) orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); ) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (!DECL_P (decl)) { error_at (elocus, "expected iteration declaration or initialization"); return NULL; } if (incr && TREE_CODE (incr) == MODOP_EXPR) { if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; incr = cp_build_modify_expr (elocus, TREE_OPERAND (incr, 0), TREE_CODE (TREE_OPERAND (incr, 1)), TREE_OPERAND (incr, 2), tf_warning_or_error); } if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (code == OMP_SIMD) { error_at (elocus, "%<#pragma omp simd%> used with class " "iteration variable %qE", decl); return NULL; } if (handle_omp_for_class_iterator (i, locus, code, declv, orig_declv, initv, condv, incrv, &body, &pre_body, clauses, collapse, ordered)) return NULL; continue; } if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && !TYPE_PTR_P (TREE_TYPE (decl))) { error_at (elocus, "invalid type for iteration variable %qE", decl); return NULL; } if (!processing_template_decl && TREE_CODE (init) != TREE_VEC) init = cp_build_modify_expr (elocus, decl, NOP_EXPR, init, tf_warning_or_error); else init = build2 (MODIFY_EXPR, void_type_node, decl, init); if (decl == error_mark_node || init == error_mark_node) return NULL; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; i++; } if (pre_body && IS_EMPTY_STMT (pre_body)) pre_body = NULL; omp_for = c_finish_omp_for (locus, code, declv, orig_declv, initv, condv, incrv, body, pre_body, !processing_template_decl); /* Check for iterators appearing in lb, b or incr expressions. */ if (omp_for && !c_omp_check_loop_iv (omp_for, orig_declv, cp_walk_subtrees)) omp_for = NULL_TREE; if (omp_for == NULL) return NULL; add_stmt (omp_for); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++) { init = TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i); decl = TREE_OPERAND (init, 0); cond = TREE_VEC_ELT (OMP_FOR_COND (omp_for), i); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i); if (!processing_template_decl) { if (TREE_CODE (TREE_OPERAND (init, 1)) == TREE_VEC) { tree t = TREE_VEC_ELT (TREE_OPERAND (init, 1), 1); TREE_VEC_ELT (TREE_OPERAND (init, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_VEC_ELT (TREE_OPERAND (init, 1), 2); TREE_VEC_ELT (TREE_OPERAND (init, 1), 2) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } else { tree t = TREE_OPERAND (init, 1); TREE_OPERAND (init, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (TREE_CODE (TREE_OPERAND (cond, 1)) == TREE_VEC) { tree t = TREE_VEC_ELT (TREE_OPERAND (cond, 1), 1); TREE_VEC_ELT (TREE_OPERAND (cond, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_VEC_ELT (TREE_OPERAND (cond, 1), 2); TREE_VEC_ELT (TREE_OPERAND (cond, 1), 2) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } else { tree t = TREE_OPERAND (cond, 1); TREE_OPERAND (cond, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } if (TREE_CODE (incr) != MODIFY_EXPR) continue; if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1)) && BINARY_CLASS_P (TREE_OPERAND (incr, 1)) && !processing_template_decl) { tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (orig_incr) TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i); } OMP_FOR_CLAUSES (omp_for) = clauses; /* For simd loops with non-static data member iterators, we could have added OMP_CLAUSE_LINEAR clauses without OMP_CLAUSE_LINEAR_STEP. As we know the step at this point, fill it in. */ if (code == OMP_SIMD && !processing_template_decl && TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)) == 1) for (tree c = omp_find_clause (clauses, OMP_CLAUSE_LINEAR); c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_LINEAR)) if (OMP_CLAUSE_LINEAR_STEP (c) == NULL_TREE) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0), 0); gcc_assert (decl == OMP_CLAUSE_DECL (c)); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree step, stept; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!INDIRECT_TYPE_P (TREE_TYPE (decl))); OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), 1); break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!INDIRECT_TYPE_P (TREE_TYPE (decl))); OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), -1); break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); switch (TREE_CODE (incr)) { case PLUS_EXPR: if (TREE_OPERAND (incr, 1) == decl) step = TREE_OPERAND (incr, 0); else step = TREE_OPERAND (incr, 1); break; case MINUS_EXPR: case POINTER_PLUS_EXPR: gcc_assert (TREE_OPERAND (incr, 0) == decl); step = TREE_OPERAND (incr, 1); break; default: gcc_unreachable (); } stept = TREE_TYPE (decl); if (INDIRECT_TYPE_P (stept)) stept = sizetype; step = fold_convert (stept, step); if (TREE_CODE (incr) == MINUS_EXPR) step = fold_build1 (NEGATE_EXPR, stept, step); OMP_CLAUSE_LINEAR_STEP (c) = step; break; default: gcc_unreachable (); } } /* Override saved methods on OMP_LOOP's OMP_CLAUSE_LASTPRIVATE_LOOP_IV clauses, we need copy ctor for those rather than default ctor, plus as for other lastprivates assignment op and dtor. */ if (code == OMP_LOOP && !processing_template_decl) for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c) && cxx_omp_create_clause_info (c, TREE_TYPE (OMP_CLAUSE_DECL (c)), false, true, true, true)) CP_OMP_CLAUSE_INFO (c) = NULL_TREE; return omp_for; } /* Fix up range for decls. Those decls were pushed into BIND's BIND_EXPR_VARS and need to be moved into the BIND_EXPR inside of the OMP_FOR's body. */ tree finish_omp_for_block (tree bind, tree omp_for) { if (omp_for == NULL_TREE || !OMP_FOR_ORIG_DECLS (omp_for) || bind == NULL_TREE || TREE_CODE (bind) != BIND_EXPR) return bind; tree b = NULL_TREE; for (int i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (omp_for)); i++) if (TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (omp_for), i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (omp_for), i))) { tree v = TREE_CHAIN (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (omp_for), i)); gcc_assert (BIND_EXPR_BLOCK (bind) && (BIND_EXPR_VARS (bind) == BLOCK_VARS (BIND_EXPR_BLOCK (bind)))); for (int j = 2; j < TREE_VEC_LENGTH (v); j++) for (tree *p = &BIND_EXPR_VARS (bind); *p; p = &DECL_CHAIN (*p)) { if (*p == TREE_VEC_ELT (v, j)) { tree var = *p; *p = DECL_CHAIN (*p); if (b == NULL_TREE) { b = make_node (BLOCK); b = build3 (BIND_EXPR, void_type_node, NULL_TREE, OMP_FOR_BODY (omp_for), b); TREE_SIDE_EFFECTS (b) = 1; OMP_FOR_BODY (omp_for) = b; } DECL_CHAIN (var) = BIND_EXPR_VARS (b); BIND_EXPR_VARS (b) = var; BLOCK_VARS (BIND_EXPR_BLOCK (b)) = var; } } BLOCK_VARS (BIND_EXPR_BLOCK (bind)) = BIND_EXPR_VARS (bind); } return bind; } void finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, tree clauses, enum omp_memory_order mo) { tree orig_lhs; tree orig_rhs; tree orig_v; tree orig_lhs1; tree orig_rhs1; bool dependent_p; tree stmt; orig_lhs = lhs; orig_rhs = rhs; orig_v = v; orig_lhs1 = lhs1; orig_rhs1 = rhs1; dependent_p = false; stmt = NULL_TREE; /* Even in a template, we can detect invalid uses of the atomic pragma if neither LHS nor RHS is type-dependent. */ if (processing_template_decl) { dependent_p = (type_dependent_expression_p (lhs) || (rhs && type_dependent_expression_p (rhs)) || (v && type_dependent_expression_p (v)) || (lhs1 && type_dependent_expression_p (lhs1)) || (rhs1 && type_dependent_expression_p (rhs1))); if (clauses) { gcc_assert (TREE_CODE (clauses) == OMP_CLAUSE && OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_HINT && OMP_CLAUSE_CHAIN (clauses) == NULL_TREE); if (type_dependent_expression_p (OMP_CLAUSE_HINT_EXPR (clauses)) || TREE_CODE (OMP_CLAUSE_HINT_EXPR (clauses)) != INTEGER_CST) dependent_p = true; } if (!dependent_p) { lhs = build_non_dependent_expr (lhs); if (rhs) rhs = build_non_dependent_expr (rhs); if (v) v = build_non_dependent_expr (v); if (lhs1) lhs1 = build_non_dependent_expr (lhs1); if (rhs1) rhs1 = build_non_dependent_expr (rhs1); } } if (!dependent_p) { bool swapped = false; if (rhs1 && cp_tree_equal (lhs, rhs)) { std::swap (rhs, rhs1); swapped = !commutative_tree_code (opcode); } if (rhs1 && !cp_tree_equal (lhs, rhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } if (lhs1 && !cp_tree_equal (lhs, lhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } stmt = c_finish_omp_atomic (loc, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, mo, processing_template_decl != 0); if (stmt == error_mark_node) return; } if (processing_template_decl) { if (code == OMP_ATOMIC_READ) { stmt = build_min_nt_loc (loc, OMP_ATOMIC_READ, orig_lhs); OMP_ATOMIC_MEMORY_ORDER (stmt) = mo; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } else { if (opcode == NOP_EXPR) stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs); else stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs); if (orig_rhs1) stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1), COMPOUND_EXPR, orig_rhs1, stmt); if (code != OMP_ATOMIC) { stmt = build_min_nt_loc (loc, code, orig_lhs1, stmt); OMP_ATOMIC_MEMORY_ORDER (stmt) = mo; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } } stmt = build2 (OMP_ATOMIC, void_type_node, clauses ? clauses : integer_zero_node, stmt); OMP_ATOMIC_MEMORY_ORDER (stmt) = mo; SET_EXPR_LOCATION (stmt, loc); } /* Avoid -Wunused-value warnings here, the whole construct has side-effects and even if it might be wrapped from fold-const.c or c-omp.c wrapped in some tree that appears to be unused, the value is not unused. */ warning_sentinel w (warn_unused_value); finish_expr_stmt (stmt); } void finish_omp_barrier (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); releasing_vec vec; tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_depobj (location_t loc, tree depobj, enum omp_clause_depend_kind kind, tree clause) { if (!error_operand_p (depobj) && !type_dependent_expression_p (depobj)) { if (!lvalue_p (depobj)) { error_at (EXPR_LOC_OR_LOC (depobj, loc), "%<depobj%> expression is not lvalue expression"); depobj = error_mark_node; } } if (processing_template_decl) { if (clause == NULL_TREE) clause = build_int_cst (integer_type_node, kind); add_stmt (build_min_nt_loc (loc, OMP_DEPOBJ, depobj, clause)); return; } if (!error_operand_p (depobj)) { tree addr = cp_build_addr_expr (depobj, tf_warning_or_error); if (addr == error_mark_node) depobj = error_mark_node; else depobj = cp_build_indirect_ref (loc, addr, RO_UNARY_STAR, tf_warning_or_error); } c_finish_omp_depobj (loc, depobj, kind, clause); } void finish_omp_flush (int mo) { tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); releasing_vec vec; if (mo != MEMMODEL_LAST) { fn = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE); vec->quick_push (build_int_cst (integer_type_node, mo)); } tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_taskwait (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); releasing_vec vec; tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_taskyield (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); releasing_vec vec; tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_cancel (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancel%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } releasing_vec vec; tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { if (OMP_CLAUSE_IF_MODIFIER (ifc) != ERROR_MARK && OMP_CLAUSE_IF_MODIFIER (ifc) != VOID_CST) error_at (OMP_CLAUSE_LOCATION (ifc), "expected %<cancel%> %<if%> clause modifier"); else { tree ifc2 = omp_find_clause (OMP_CLAUSE_CHAIN (ifc), OMP_CLAUSE_IF); if (ifc2 != NULL_TREE) { gcc_assert (OMP_CLAUSE_IF_MODIFIER (ifc) == VOID_CST && OMP_CLAUSE_IF_MODIFIER (ifc2) != ERROR_MARK && OMP_CLAUSE_IF_MODIFIER (ifc2) != VOID_CST); error_at (OMP_CLAUSE_LOCATION (ifc2), "expected %<cancel%> %<if%> clause modifier"); } } if (!processing_template_decl) ifc = maybe_convert_cond (OMP_CLAUSE_IF_EXPR (ifc)); else ifc = build_x_binary_op (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, OMP_CLAUSE_IF_EXPR (ifc), ERROR_MARK, integer_zero_node, ERROR_MARK, NULL, tf_warning_or_error); } else ifc = boolean_true_node; vec->quick_push (build_int_cst (integer_type_node, mask)); vec->quick_push (ifc); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_cancellation_point (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancellation point%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } releasing_vec vec = make_tree_vector_single (build_int_cst (integer_type_node, mask)); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } /* Begin a __transaction_atomic or __transaction_relaxed statement. If PCOMPOUND is non-null, this is for a function-transaction-block, and we should create an extra compound stmt. */ tree begin_transaction_stmt (location_t loc, tree *pcompound, int flags) { tree r; if (pcompound) *pcompound = begin_compound_stmt (0); r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE); /* Only add the statement to the function if support enabled. */ if (flag_tm) add_stmt (r); else error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0 ? G_("%<__transaction_relaxed%> without " "transactional memory support enabled") : G_("%<__transaction_atomic%> without " "transactional memory support enabled"))); TRANSACTION_EXPR_BODY (r) = push_stmt_list (); TREE_SIDE_EFFECTS (r) = 1; return r; } /* End a __transaction_atomic or __transaction_relaxed statement. If COMPOUND_STMT is non-null, this is for a function-transaction-block, and we should end the compound. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ void finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex) { TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt)); TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0; TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0; TRANSACTION_EXPR_IS_STMT (stmt) = 1; /* noexcept specifications are not allowed for function transactions. */ gcc_assert (!(noex && compound_stmt)); if (noex) { tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt), noex); protected_set_expr_location (body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt))); TREE_SIDE_EFFECTS (body) = 1; TRANSACTION_EXPR_BODY (stmt) = body; } if (compound_stmt) finish_compound_stmt (compound_stmt); } /* Build a __transaction_atomic or __transaction_relaxed expression. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ tree build_transaction_expr (location_t loc, tree expr, int flags, tree noex) { tree ret; if (noex) { expr = build_must_not_throw_expr (expr, noex); protected_set_expr_location (expr, loc); TREE_SIDE_EFFECTS (expr) = 1; } ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr); if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret) = 1; TREE_SIDE_EFFECTS (ret) = 1; SET_EXPR_LOCATION (ret, loc); return ret; } void init_cp_semantics (void) { } /* Build a STATIC_ASSERT for a static assertion with the condition CONDITION and the message text MESSAGE. LOCATION is the location of the static assertion in the source code. When MEMBER_P, this static assertion is a member of a class. */ void finish_static_assert (tree condition, tree message, location_t location, bool member_p) { tsubst_flags_t complain = tf_warning_or_error; if (message == NULL_TREE || message == error_mark_node || condition == NULL_TREE || condition == error_mark_node) return; if (check_for_bare_parameter_packs (condition)) condition = error_mark_node; if (instantiation_dependent_expression_p (condition)) { /* We're in a template; build a STATIC_ASSERT and put it in the right place. */ tree assertion; assertion = make_node (STATIC_ASSERT); STATIC_ASSERT_CONDITION (assertion) = condition; STATIC_ASSERT_MESSAGE (assertion) = message; STATIC_ASSERT_SOURCE_LOCATION (assertion) = location; if (member_p) maybe_add_class_template_decl_list (current_class_type, assertion, /*friend_p=*/0); else add_stmt (assertion); return; } /* Save the condition in case it was a concept check. */ tree orig_condition = condition; /* Fold the expression and convert it to a boolean value. */ condition = perform_implicit_conversion_flags (boolean_type_node, condition, complain, LOOKUP_NORMAL); condition = fold_non_dependent_expr (condition, complain, /*manifestly_const_eval=*/true); if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition)) /* Do nothing; the condition is satisfied. */ ; else { location_t saved_loc = input_location; input_location = location; if (TREE_CODE (condition) == INTEGER_CST && integer_zerop (condition)) { int sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (message)))); int len = TREE_STRING_LENGTH (message) / sz - 1; /* Report the error. */ if (len == 0) error ("static assertion failed"); else error ("static assertion failed: %s", TREE_STRING_POINTER (message)); /* Actually explain the failure if this is a concept check or a requires-expression. */ if (concept_check_p (orig_condition) || TREE_CODE (orig_condition) == REQUIRES_EXPR) diagnose_constraints (location, orig_condition, NULL_TREE); } else if (condition && condition != error_mark_node) { error ("non-constant condition for static assertion"); if (require_rvalue_constant_expression (condition)) cxx_constant_value (condition); } input_location = saved_loc; } } /* Implements the C++0x decltype keyword. Returns the type of EXPR, suitable for use as a type-specifier. ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an id-expression or a class member access, FALSE when it was parsed as a full expression. */ tree finish_decltype_type (tree expr, bool id_expression_or_member_access_p, tsubst_flags_t complain) { tree type = NULL_TREE; if (!expr || error_operand_p (expr)) return error_mark_node; if (TYPE_P (expr) || TREE_CODE (expr) == TYPE_DECL || (TREE_CODE (expr) == BIT_NOT_EXPR && TYPE_P (TREE_OPERAND (expr, 0)))) { if (complain & tf_error) error ("argument to %<decltype%> must be an expression"); return error_mark_node; } /* Depending on the resolution of DR 1172, we may later need to distinguish instantiation-dependent but not type-dependent expressions so that, say, A<decltype(sizeof(T))>::U doesn't require 'typename'. */ if (instantiation_dependent_uneval_expression_p (expr)) { type = cxx_make_type (DECLTYPE_TYPE); DECLTYPE_TYPE_EXPR (type) = expr; DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type) = id_expression_or_member_access_p; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } else if (processing_template_decl) { ++cp_unevaluated_operand; expr = instantiate_non_dependent_expr_sfinae (expr, complain); --cp_unevaluated_operand; if (expr == error_mark_node) return error_mark_node; } /* The type denoted by decltype(e) is defined as follows: */ expr = resolve_nondeduced_context (expr, complain); if (invalid_nonstatic_memfn_p (input_location, expr, complain)) return error_mark_node; if (type_unknown_p (expr)) { if (complain & tf_error) error ("%<decltype%> cannot resolve address of overloaded function"); return error_mark_node; } /* To get the size of a static data member declared as an array of unknown bound, we need to instantiate it. */ if (VAR_P (expr) && VAR_HAD_UNKNOWN_BOUND (expr) && DECL_TEMPLATE_INSTANTIATION (expr)) instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false); if (id_expression_or_member_access_p) { /* If e is an id-expression or a class member access (5.2.5 [expr.ref]), decltype(e) is defined as the type of the entity named by e. If there is no such entity, or e names a set of overloaded functions, the program is ill-formed. */ if (identifier_p (expr)) expr = lookup_name (expr); if (INDIRECT_REF_P (expr) || TREE_CODE (expr) == VIEW_CONVERT_EXPR) /* This can happen when the expression is, e.g., "a.b". Just look at the underlying operand. */ expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == MEMBER_REF || TREE_CODE (expr) == SCOPE_REF) /* We're only interested in the field itself. If it is a BASELINK, we will need to see through it in the next step. */ expr = TREE_OPERAND (expr, 1); if (BASELINK_P (expr)) /* See through BASELINK nodes to the underlying function. */ expr = BASELINK_FUNCTIONS (expr); /* decltype of a decomposition name drops references in the tuple case (unlike decltype of a normal variable) and keeps cv-qualifiers from the containing object in the other cases (unlike decltype of a member access expression). */ if (DECL_DECOMPOSITION_P (expr)) { if (DECL_HAS_VALUE_EXPR_P (expr)) /* Expr is an array or struct subobject proxy, handle bit-fields properly. */ return unlowered_expr_type (expr); else /* Expr is a reference variable for the tuple case. */ return lookup_decomp_type (expr); } switch (TREE_CODE (expr)) { case FIELD_DECL: if (DECL_BIT_FIELD_TYPE (expr)) { type = DECL_BIT_FIELD_TYPE (expr); break; } /* Fall through for fields that aren't bitfields. */ gcc_fallthrough (); case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case TEMPLATE_PARM_INDEX: expr = mark_type_use (expr); type = TREE_TYPE (expr); break; case ERROR_MARK: type = error_mark_node; break; case COMPONENT_REF: case COMPOUND_EXPR: mark_type_use (expr); type = is_bitfield_expr_with_lowered_type (expr); if (!type) type = TREE_TYPE (TREE_OPERAND (expr, 1)); break; case BIT_FIELD_REF: gcc_unreachable (); case INTEGER_CST: case PTRMEM_CST: /* We can get here when the id-expression refers to an enumerator or non-type template parameter. */ type = TREE_TYPE (expr); break; default: /* Handle instantiated template non-type arguments. */ type = TREE_TYPE (expr); break; } } else { /* Within a lambda-expression: Every occurrence of decltype((x)) where x is a possibly parenthesized id-expression that names an entity of automatic storage duration is treated as if x were transformed into an access to a corresponding data member of the closure type that would have been declared if x were a use of the denoted entity. */ if (outer_automatic_var_p (expr) && current_function_decl && LAMBDA_FUNCTION_P (current_function_decl)) type = capture_decltype (expr); else if (error_operand_p (expr)) type = error_mark_node; else if (expr == current_class_ptr) /* If the expression is just "this", we want the cv-unqualified pointer for the "this" type. */ type = TYPE_MAIN_VARIANT (TREE_TYPE (expr)); else { /* Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */ cp_lvalue_kind clk = lvalue_kind (expr); type = unlowered_expr_type (expr); gcc_assert (!TYPE_REF_P (type)); /* For vector types, pick a non-opaque variant. */ if (VECTOR_TYPE_P (type)) type = strip_typedefs (type); if (clk != clk_none && !(clk & clk_class)) type = cp_build_reference_type (type, (clk & clk_rvalueref)); } } return type; } /* Called from trait_expr_value to evaluate either __has_nothrow_assign or __has_nothrow_copy, depending on assign_p. Returns true iff all the copy {ctor,assign} fns are nothrow. */ static bool classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p) { tree fns = NULL_TREE; if (assign_p || TYPE_HAS_COPY_CTOR (type)) fns = get_class_binding (type, assign_p ? assign_op_identifier : ctor_identifier); bool saw_copy = false; for (ovl_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (copy_fn_p (fn) > 0) { saw_copy = true; if (!maybe_instantiate_noexcept (fn) || !TYPE_NOTHROW_P (TREE_TYPE (fn))) return false; } } return saw_copy; } /* Actually evaluates the trait. */ static bool trait_expr_value (cp_trait_kind kind, tree type1, tree type2) { enum tree_code type_code1; tree t; type_code1 = TREE_CODE (type1); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, true)))); case CPTK_HAS_TRIVIAL_ASSIGN: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1)))); case CPTK_HAS_NOTHROW_CONSTRUCTOR: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2) || (CLASS_TYPE_P (type1) && (t = locate_ctor (type1)) && maybe_instantiate_noexcept (t) && TYPE_NOTHROW_P (TREE_TYPE (t)))); case CPTK_HAS_TRIVIAL_CONSTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1))); case CPTK_HAS_NOTHROW_COPY: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, false))); case CPTK_HAS_TRIVIAL_COPY: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1))); case CPTK_HAS_TRIVIAL_DESTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DESTRUCTOR (type1))); case CPTK_HAS_VIRTUAL_DESTRUCTOR: return type_has_virtual_destructor (type1); case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: return type_has_unique_obj_representations (type1); case CPTK_IS_ABSTRACT: return ABSTRACT_CLASS_TYPE_P (type1); case CPTK_IS_AGGREGATE: return CP_AGGREGATE_TYPE_P (type1); case CPTK_IS_BASE_OF: return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && (same_type_ignoring_top_level_qualifiers_p (type1, type2) || DERIVED_FROM_P (type1, type2))); case CPTK_IS_CLASS: return NON_UNION_CLASS_TYPE_P (type1); case CPTK_IS_EMPTY: return NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1); case CPTK_IS_ENUM: return type_code1 == ENUMERAL_TYPE; case CPTK_IS_FINAL: return CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1); case CPTK_IS_LITERAL_TYPE: return literal_type_p (type1); case CPTK_IS_POD: return pod_type_p (type1); case CPTK_IS_POLYMORPHIC: return CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1); case CPTK_IS_SAME_AS: return same_type_p (type1, type2); case CPTK_IS_STD_LAYOUT: return std_layout_type_p (type1); case CPTK_IS_TRIVIAL: return trivial_type_p (type1); case CPTK_IS_TRIVIALLY_ASSIGNABLE: return is_trivially_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: return is_trivially_xible (INIT_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_COPYABLE: return trivially_copyable_p (type1); case CPTK_IS_UNION: return type_code1 == UNION_TYPE; case CPTK_IS_ASSIGNABLE: return is_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_CONSTRUCTIBLE: return is_xible (INIT_EXPR, type1, type2); default: gcc_unreachable (); return false; } } /* If TYPE is an array of unknown bound, or (possibly cv-qualified) void, or a complete type, returns true, otherwise false. */ static bool check_trait_type (tree type) { if (type == NULL_TREE) return true; if (TREE_CODE (type) == TREE_LIST) return (check_trait_type (TREE_VALUE (type)) && check_trait_type (TREE_CHAIN (type))); if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && COMPLETE_TYPE_P (TREE_TYPE (type))) return true; if (VOID_TYPE_P (type)) return true; return !!complete_type_or_else (strip_array_types (type), NULL_TREE); } /* Process a trait expression. */ tree finish_trait_expr (location_t loc, cp_trait_kind kind, tree type1, tree type2) { if (type1 == error_mark_node || type2 == error_mark_node) return error_mark_node; if (processing_template_decl) { tree trait_expr = make_node (TRAIT_EXPR); TREE_TYPE (trait_expr) = boolean_type_node; TRAIT_EXPR_TYPE1 (trait_expr) = type1; TRAIT_EXPR_TYPE2 (trait_expr) = type2; TRAIT_EXPR_KIND (trait_expr) = kind; TRAIT_EXPR_LOCATION (trait_expr) = loc; return trait_expr; } switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: case CPTK_HAS_TRIVIAL_ASSIGN: case CPTK_HAS_NOTHROW_CONSTRUCTOR: case CPTK_HAS_TRIVIAL_CONSTRUCTOR: case CPTK_HAS_NOTHROW_COPY: case CPTK_HAS_TRIVIAL_COPY: case CPTK_HAS_TRIVIAL_DESTRUCTOR: case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: case CPTK_HAS_VIRTUAL_DESTRUCTOR: case CPTK_IS_ABSTRACT: case CPTK_IS_AGGREGATE: case CPTK_IS_EMPTY: case CPTK_IS_FINAL: case CPTK_IS_LITERAL_TYPE: case CPTK_IS_POD: case CPTK_IS_POLYMORPHIC: case CPTK_IS_STD_LAYOUT: case CPTK_IS_TRIVIAL: case CPTK_IS_TRIVIALLY_COPYABLE: if (!check_trait_type (type1)) return error_mark_node; break; case CPTK_IS_ASSIGNABLE: case CPTK_IS_CONSTRUCTIBLE: break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: if (!check_trait_type (type1) || !check_trait_type (type2)) return error_mark_node; break; case CPTK_IS_BASE_OF: if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && !same_type_ignoring_top_level_qualifiers_p (type1, type2) && !complete_type_or_else (type2, NULL_TREE)) /* We already issued an error. */ return error_mark_node; break; case CPTK_IS_CLASS: case CPTK_IS_ENUM: case CPTK_IS_UNION: case CPTK_IS_SAME_AS: break; default: gcc_unreachable (); } tree val = (trait_expr_value (kind, type1, type2) ? boolean_true_node : boolean_false_node); return maybe_wrap_with_location (val, loc); } /* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64, which is ignored for C++. */ void set_float_const_decimal64 (void) { } void clear_float_const_decimal64 (void) { } bool float_const_decimal64_p (void) { return 0; } /* Return true if T designates the implied `this' parameter. */ bool is_this_parameter (tree t) { if (!DECL_P (t) || DECL_NAME (t) != this_identifier) return false; gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t) || (cp_binding_oracle && TREE_CODE (t) == VAR_DECL)); return true; } /* Insert the deduced return type for an auto function. */ void apply_deduced_return_type (tree fco, tree return_type) { tree result; if (return_type == error_mark_node) return; if (DECL_CONV_FN_P (fco)) DECL_NAME (fco) = make_conv_op_name (return_type); TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco)); result = DECL_RESULT (fco); if (result == NULL_TREE) return; if (TREE_TYPE (result) == return_type) return; if (!processing_template_decl && !VOID_TYPE_P (return_type) && !complete_type_or_else (return_type, NULL_TREE)) return; /* We already have a DECL_RESULT from start_preparsed_function. Now we need to redo the work it and allocate_struct_function did to reflect the new type. */ gcc_assert (current_function_decl == fco); result = build_decl (input_location, RESULT_DECL, NULL_TREE, TYPE_MAIN_VARIANT (return_type)); DECL_ARTIFICIAL (result) = 1; DECL_IGNORED_P (result) = 1; cp_apply_type_quals_to_decl (cp_type_quals (return_type), result); DECL_RESULT (fco) = result; if (!processing_template_decl) { bool aggr = aggregate_value_p (result, fco); #ifdef PCC_STATIC_STRUCT_RETURN cfun->returns_pcc_struct = aggr; #endif cfun->returns_struct = aggr; } } /* DECL is a local variable or parameter from the surrounding scope of a lambda-expression. Returns the decltype for a use of the capture field for DECL even if it hasn't been captured yet. */ static tree capture_decltype (tree decl) { tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl)); tree cap = lookup_name (DECL_NAME (decl), LOOK_where::BLOCK, LOOK_want::HIDDEN_LAMBDA); tree type; if (cap && is_capture_proxy (cap)) type = TREE_TYPE (cap); else switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam)) { case CPLD_NONE: error ("%qD is not captured", decl); return error_mark_node; case CPLD_COPY: type = TREE_TYPE (decl); if (TYPE_REF_P (type) && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) type = TREE_TYPE (type); break; case CPLD_REFERENCE: type = TREE_TYPE (decl); if (!TYPE_REF_P (type)) type = build_reference_type (TREE_TYPE (decl)); break; default: gcc_unreachable (); } if (!TYPE_REF_P (type)) { if (!LAMBDA_EXPR_MUTABLE_P (lam)) type = cp_build_qualified_type (type, (cp_type_quals (type) |TYPE_QUAL_CONST)); type = build_reference_type (type); } return type; } /* Build a unary fold expression of EXPR over OP. If IS_RIGHT is true, this is a right unary fold. Otherwise it is a left unary fold. */ static tree finish_unary_fold_expr (tree expr, int op, tree_code dir) { /* Build a pack expansion (assuming expr has pack type). */ if (!uses_parameter_packs (expr)) { error_at (location_of (expr), "operand of fold expression has no " "unexpanded parameter packs"); return error_mark_node; } tree pack = make_pack_expansion (expr); /* Build the fold expression. */ tree code = build_int_cstu (integer_type_node, abs (op)); tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack); FOLD_EXPR_MODIFY_P (fold) = (op < 0); return fold; } tree finish_left_unary_fold_expr (tree expr, int op) { return finish_unary_fold_expr (expr, op, UNARY_LEFT_FOLD_EXPR); } tree finish_right_unary_fold_expr (tree expr, int op) { return finish_unary_fold_expr (expr, op, UNARY_RIGHT_FOLD_EXPR); } /* Build a binary fold expression over EXPR1 and EXPR2. The associativity of the fold is determined by EXPR1 and EXPR2 (whichever has an unexpanded parameter pack). */ tree finish_binary_fold_expr (tree pack, tree init, int op, tree_code dir) { pack = make_pack_expansion (pack); tree code = build_int_cstu (integer_type_node, abs (op)); tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack, init); FOLD_EXPR_MODIFY_P (fold) = (op < 0); return fold; } tree finish_binary_fold_expr (tree expr1, tree expr2, int op) { // Determine which expr has an unexpanded parameter pack and // set the pack and initial term. bool pack1 = uses_parameter_packs (expr1); bool pack2 = uses_parameter_packs (expr2); if (pack1 && !pack2) return finish_binary_fold_expr (expr1, expr2, op, BINARY_RIGHT_FOLD_EXPR); else if (pack2 && !pack1) return finish_binary_fold_expr (expr2, expr1, op, BINARY_LEFT_FOLD_EXPR); else { if (pack1) error ("both arguments in binary fold have unexpanded parameter packs"); else error ("no unexpanded parameter packs in binary fold"); } return error_mark_node; } /* Finish __builtin_launder (arg). */ tree finish_builtin_launder (location_t loc, tree arg, tsubst_flags_t complain) { tree orig_arg = arg; if (!type_dependent_expression_p (arg)) arg = decay_conversion (arg, complain); if (error_operand_p (arg)) return error_mark_node; if (!type_dependent_expression_p (arg) && !TYPE_PTR_P (TREE_TYPE (arg))) { error_at (loc, "non-pointer argument to %<__builtin_launder%>"); return error_mark_node; } if (processing_template_decl) arg = orig_arg; return build_call_expr_internal_loc (loc, IFN_LAUNDER, TREE_TYPE (arg), 1, arg); } /* Finish __builtin_convertvector (arg, type). */ tree cp_build_vec_convert (tree arg, location_t loc, tree type, tsubst_flags_t complain) { if (error_operand_p (type)) return error_mark_node; if (error_operand_p (arg)) return error_mark_node; tree ret = NULL_TREE; if (!type_dependent_expression_p (arg) && !dependent_type_p (type)) ret = c_build_vec_convert (cp_expr_loc_or_input_loc (arg), decay_conversion (arg, complain), loc, type, (complain & tf_error) != 0); if (!processing_template_decl) return ret; return build_call_expr_internal_loc (loc, IFN_VEC_CONVERT, type, 1, arg); } #include "gt-cp-semantics.h"
gesummv.c
/** * gesummv.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <omp.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU 1 /* Problem size */ #define N 8192 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y) { for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void gesummv_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y) { #pragma omp target device(GPU) map(to : A[:N * N], B[:N * N], x[:N]) \ map(from : y[:N]) #pragma omp parallel for for (int i = 0; i < N; i++) { DATA_TYPE tmp = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp = A[i * N + j] * x[j] + tmp; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp + BETA * y[i]; } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE)i) / N; for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE)i * j) / N; B[i * N + j] = ((DATA_TYPE)i * j) / N; } } } void compareResults(DATA_TYPE *y, DATA_TYPE *y_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < (N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; A = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); x = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication >>\n"); init(A, B, x); t_start = rtclock(); gesummv_OMP(A, B, x, y_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); init(A, B, x); t_start = rtclock(); gesummv(A, B, x, y); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); return 0; }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential colorspace of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleMatteType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const Quantum red,const Quantum green, const Quantum blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.212656*R+0.715158*G+0.072186*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (0.212656*(double) i); y_map[i].y=(MagickRealType) (0.715158*(double) i); z_map[i].y=(MagickRealType) (0.072186*(double) i); x_map[i].z=(MagickRealType) (0.212656*(double) i); y_map[i].z=(MagickRealType) (0.715158*(double) i); z_map[i].z=(MagickRealType) (0.072186*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.0; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.0; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,&image->exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) image->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == LinearGRAYColorspace) return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod)); if (colorspace == GRAYColorspace) return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,Quantum *red,Quantum *green,Quantum *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=ClampToQuantum(QuantumRange*X); green=ClampToQuantum(QuantumRange*Y); blue=ClampToQuantum(QuantumRange*Z); break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))])); green=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))])); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))])); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
edge_data_c2c.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 08:26:51 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_EDGE_DATA_C2C_H_INCLUDED ) #define KRATOS_EDGE_DATA_C2C_H_INCLUDED //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION //we suggest defining the following macro*/*/ // #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "free_surface_application.h" #include "utilities/openmp_utils.h" namespace Kratos { // template<unsigned int TDim> // class EdgeConstructionScratch // { // public: // array_1d<double, TDim+1> N; // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx; // double volume; // double weighting_factor = 1.0 / static_cast<double>(TDim+1); // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent; // array_1d<double, TDim+1> mass_lumped; // array_1d<unsigned int, TDim+1> nodal_indices; // array_1d<double, TDim+1> heights; // // } //structure definition for fast access to edge data using CSR format template<unsigned int TDim> class EdgesStructureTypeC2C { public: //component ij of the consistent mass matrix (M = Ni * Nj * dOmega) double Mass; //components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega) //double Laplacian; boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ; //components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega) array_1d<double, TDim> Ni_DNj; //components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega) //TRANSPOSED GRADIENT array_1d<double, TDim> DNi_Nj; //************************************************************************************* //************************************************************************************* //gradient integrated by parts //RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i //ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually" inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj[k]*v[k] inline void Add_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } inline void Sub_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj pj inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * (p_j - p_i); } inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * (p_j - p_i); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += DNi_Nj[k]*v[k] inline void Add_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } inline void Sub_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } //************************************************************************************* //************************************************************************************* //gets the trace of the laplacian matrix inline void CalculateScalarLaplacian(double& l_ij) { l_ij = LaplacianIJ(0, 0); for (unsigned int comp = 1; comp < TDim; comp++) l_ij += LaplacianIJ(comp, comp); } inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { // #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // double temp = a_i[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // temp += a_i[k_comp] * Ni_DNj[k_comp]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]); // #else // double aux_i = a_i[0] * Ni_DNj[0]; // double aux_j = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // aux_i += a_i[k_comp] * Ni_DNj[k_comp]; // aux_j += a_j[k_comp] * Ni_DNj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; // #endif // for (unsigned int comp = 0; comp < TDim; comp++) // destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; double second = a_i[0] * DNi_Nj[0]; double first = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { second += a_i[k_comp] * DNi_Nj[k_comp]; first += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += first * U_j[l_comp] - second * U_i[l_comp]; } inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { // #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // double temp = a_i[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // temp += a_i[k_comp] * Ni_DNj[k_comp]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]); // #else // double aux_i = a_i[0] * Ni_DNj[0]; // double aux_j = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // aux_i += a_i[k_comp] * Ni_DNj[k_comp]; // aux_j += a_j[k_comp] * Ni_DNj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; // #endif double second = a_i[0] * DNi_Nj[0]; double first = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { second += a_i[k_comp] * DNi_Nj[k_comp]; first += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= first * U_j[l_comp] - second * U_i[l_comp]; } inline void Sub_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination -= temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination -= aux_j * phi_j - aux_i * phi_i; #endif // double second = a_i[0] * DNi_Nj[0]; // double first = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // second += a_i[k_comp] * DNi_Nj[k_comp]; // first += a_j[k_comp] * Ni_DNj[k_comp]; // } // destination -= first * phi_j - second * phi_i; } inline void Add_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination += temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination += aux_j * phi_j - aux_i * phi_i; #endif // double second = a_i[0] * DNi_Nj[0]; // double first = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // second += a_i[k_comp] * DNi_Nj[k_comp]; // first += a_j[k_comp] * Ni_DNj[k_comp]; // } // destination += first * phi_j - second * phi_i; } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // double temp = 0.0; // double lij = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // lij += LaplacianIJ(k_comp,k_comp); // temp = a_i[k_comp] * a_i[k_comp]; // } // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]); } // inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low, // const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i, // const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j // ) // { // double conv_stab = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) // conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp); // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // //// adding pressure // double press_diff = p_j-p_i; // for (unsigned int j_comp = 0; j_comp < TDim; j_comp++) // { // for (unsigned int i_comp = 0; i_comp < TDim; i_comp++) // stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ; // } // // // } inline void CalculateConvectionStabilization_LOW(double& stab_low, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); stab_low = conv_stab * (phi_j - phi_i); } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_j += a_i[k_comp] * Ni_DNj[k_comp]; // temp_i += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_i += a_i[k_comp] * Ni_DNj[k_comp]; // temp_j += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]); #endif } inline void CalculateConvectionStabilization_HIGH(double& stab_high, const array_1d<double, TDim>& a_i, const double& pi_i, const array_1d<double, TDim>& a_j, const double& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } stab_high = -(aux_j * pi_j - aux_i * pi_i); #endif } //************************************************************************************* //************************************************************************************* inline void Add_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Add_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination += tau * (stab_low - beta * stab_high); } inline void Sub_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Sub_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination -= tau * (stab_low - beta * stab_high); } //************************************************************************************* //************************************************************************************* inline void Add_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]); } inline void Sub_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]); } }; //class definition of matrices using CSR format template<unsigned int TDim, class TSparseSpace> class MatrixContainerC2C { public: //name for the self defined structure typedef EdgesStructureTypeC2C<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //names for separately stored node based values typedef vector<double> ValuesVectorType; // typedef std::vector< array_1d<double,TDim> > CalcVectorType; typedef vector< array_1d<double, TDim> > CalcVectorType; //constructor and destructor MatrixContainerC2C() { }; ~MatrixContainerC2C() { }; //functions to return private values inline unsigned int GetNumberEdges() { return mNumberEdges; } inline EdgesVectorType& GetEdgeValues() { return mNonzeroEdgeValues; } inline IndicesVectorType& GetColumnIndex() { return mColumnIndex; } inline IndicesVectorType& GetRowStartIndex() { return mRowStartIndex; } inline ValuesVectorType& GetLumpedMass() { return mLumpedMassMatrix; } inline ValuesVectorType& GetInvertedMass() { return mInvertedMassMatrix; } inline CalcVectorType& GetDiagGradient() { return mDiagGradientMatrix; } inline ValuesVectorType& GetHmin() { return mHmin; } //******************************************************** //function to size and initialize the vector of CSR tuples void ConstructCSRVector(ModelPart& model_part) { KRATOS_TRY //SIZE OF CSR VECTOR //defining the number of nodes and edges int n_nodes = model_part.Nodes().size(); //remark: no colouring algorithm is used here (symmetry is neglected) // respectively edge ij is considered different from edge ji mNumberEdges = 0; //counter to assign and get global nodal index int i_node = 0; //counting the edges connecting the nodes for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++) { //counting neighbours of each node mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size(); //DIAGONAL TERMS //mNumberEdges++; //assigning global index to each node node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++); } //error message in case number of nodes does not coincide with number of indices if (i_node != n_nodes) KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!"); //allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues); mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex); mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex); mLumpedMassMatrix.resize(n_nodes); SetToZero(mLumpedMassMatrix); mInvertedMassMatrix.resize(n_nodes); SetToZero(mInvertedMassMatrix); mDiagGradientMatrix.resize(n_nodes); SetToZero(mDiagGradientMatrix); mHmin.resize(n_nodes); SetToZero(mHmin); //INITIALIZING OF THE CSR VECTOR //temporary variable as the row start index of a node depends on the number of neighbours of the previous one unsigned int row_start_temp = 0; int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<int> row_partition(number_of_threads); OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++) { typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i; //main loop over all nodes // for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++) // { //getting the global index of the node i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX)); //determining its neighbours GlobalPointersVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES); //number of neighbours of node i determines row start index for the following node unsigned int n_neighbours = neighb_nodes.size(); //DIAGONAL TERMS //n_neighbours++; //reserving memory for work array std::vector<unsigned int> work_array; work_array.reserve(n_neighbours); //DIAGONAL TERMS //work_array.push_back(i_node); //nested loop over the neighbouring nodes for (GlobalPointersVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++) { //getting global index of the neighbouring node work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX))); } //reordering neighbours following their global indices std::sort(work_array.begin(), work_array.end()); //setting current row start index mRowStartIndex[i_node] = row_start_temp; //nested loop over the by now ordered neighbours for (unsigned int counter = 0; counter < n_neighbours; counter++) { //getting global index of the neighbouring node unsigned int j_neighbour = work_array[counter]; //calculating CSR index unsigned int csr_index = mRowStartIndex[i_node] + counter; //saving column index j of the original matrix mColumnIndex[csr_index] = j_neighbour; //initializing the CSR vector entries with zero mNonzeroEdgeValues[csr_index].Mass = 0.0; //mNonzeroEdgeValues[csr_index].Laplacian = 0.0; noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim); noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim); //TRANSPOSED GRADIENT noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim); } //preparing row start index for next node row_start_temp += n_neighbours; } } } //adding last entry (necessary for abort criterion of loops) mRowStartIndex[n_nodes] = mNumberEdges; //INITIALIZING NODE BASED VALUES //lumped mass matrix (elements Mi) /* #pragma omp parallel for for (int i_node=0; i_node<n_nodes; i_node++) mLumpedMassMatrix[i_node] = 0.0;*/ #pragma omp parallel for //set the heights to a huge number for (int i_node = 0; i_node < n_nodes; i_node++) mHmin[i_node] = 1e10; //diagonal of gradient matrix (elements Gii) // #pragma omp parallel for // for (int i_node=0; i_node<n_nodes; i_node++) // noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim); KRATOS_CATCH("") } //********************************* //function to precalculate CSR data void BuildCSRData(ModelPart& model_part) { KRATOS_TRY //PRECALCULATING CSR DATA //defining temporary local variables for elementwise addition //shape functions array_1d<double, TDim + 1 > N; //shape function derivatives boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx; //volume double volume; //weighting factor double weighting_factor = 1.0 / static_cast<double> (TDim + 1); //elemental matrices boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent; //boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian; array_1d<double, TDim + 1 > mass_lumped; //global indices of elemental nodes array_1d<unsigned int, TDim + 1 > nodal_indices; array_1d<double, TDim + 1 > heights; //loop over all elements for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++) { //LOCAL ELEMENTWISE CALCULATIONS //getting geometry data of the element GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume); //calculate lenght of the heights of the element for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0); for (unsigned int comp = 1; comp < TDim; comp++) { heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp); } heights[ie_node] = 1.0 / sqrt(heights[ie_node]); // KRATOS_WATCH(heights); } //setting up elemental mass matrices CalculateMassMatrix(mass_consistent, volume); noalias(mass_lumped) = ZeroVector(TDim + 1); for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume; mass_lumped[ie_node] += mass_consistent(ie_node, je_node); } //mass_lumped[ie_node] = volume * N[ie_node]; } /*OLD DATA STRUCTURE //calculating elemental laplacian matrix noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1); for (unsigned int ie_node=0; ie_node<=TDim; ie_node++) for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++) //componentwise multiplication for (unsigned int component=0; component<TDim; component++) { //taking advantage of symmetry double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume; laplacian(ie_node,je_node) += temp; laplacian(je_node,ie_node) += temp; } //multiply gradient with volume referring to each gauss point dN_dx *= (volume / double(TDim+1));*/ //(corresponding to Ni * dOmega respectively Nj * dOmega) double weighted_volume = volume * weighting_factor; //ASSEMBLING GLOBAL DATA STRUCTURE //loop over the nodes of the element to determine their global indices for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX)); //assembling global "edge matrices" by adding local contributions for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //check the heights and change the value if minimal is found if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node]) mHmin[ nodal_indices[ie_node] ] = heights[ie_node]; for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //remark: there is no edge linking node i with itself! //DIAGONAL TERMS if (ie_node != je_node) { //calculating CSR index from global index unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]); //assigning precalculated element data to the referring edges //contribution to edge mass mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node); //contribution to edge laplacian /*OLD DATA STRUCTURE mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/ boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume; //contribution to edge gradient array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //gradient[l_comp] += dN_dx(je_node,l_comp); gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume; //TRANSPOSED GRADIENT //contribution to transposed edge gradient array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //transp_gradient[l_comp] += dN_dx(ie_node,l_comp); transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume; } } } //assembling node based vectors for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) //diagonal of the global lumped mass matrix mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node]; for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //diagonal of the global gradient matrix array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]]; for (unsigned int component = 0; component < TDim; component++) //gradient[component] += dN_dx(ie_node,component); gradient[component] += dN_dx(ie_node, component) * weighted_volume; } } //copy mass matrix to inverted mass matrix for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode]; } //perform MPI syncronization between the domains //calculating inverted mass matrix (this requires syncronization for MPI paraellelism for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode]; } KRATOS_CATCH("") } //****************************************** //function to calculate CSR index of edge ij unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning CSR index of edge ij return csr_index; KRATOS_CATCH("") } //*********************************************** //function to get pointer to CSR tuple of edge ij CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning pointer to CSR tuple of edge ij return &mNonzeroEdgeValues[csr_index]; KRATOS_CATCH("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mNonzeroEdgeValues.clear(); mColumnIndex.clear(); mRowStartIndex.clear(); mInvertedMassMatrix.clear(); mLumpedMassMatrix.clear(); mDiagGradientMatrix.clear(); mHmin.clear(); KRATOS_CATCH("") } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); #pragma omp parallel for firstprivate(n_nodes, it_begin) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = (*node_it)[component]; } KRATOS_CATCH(""); } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get reference of destination array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos); //save vector in database for (unsigned int component = 0; component < TDim; component++) vector[component] = (rOrigin[i_node])[component]; } KRATOS_CATCH(""); } void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); int i_node = i; //get reference of destination double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save scalar in database scalar = rOrigin[i_node]; } KRATOS_CATCH(""); } //********************************************************************* //destination = origin1 + value * Minv*origin void Add_Minv_value( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; double temp = value * m_inv; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = origin_vec1[comp] + temp * origin_value[comp]; } KRATOS_CATCH("") } void Add_Minv_value( ValuesVectorType& destination, const ValuesVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const ValuesVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const double& origin_vec1 = origin1[i_node]; const double& origin_value = origin[i_node]; double temp = value * m_inv; dest = origin_vec1 + temp * origin_value; } KRATOS_CATCH("") } //********************************************************************** void AllocateAndSetToZero(CalcVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void AllocateAndSetToZero(ValuesVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void SetToZero(CalcVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void SetToZero(ValuesVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void AssignVectorToVector(const CalcVectorType& origin, CalcVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { const array_1d<double, TDim>& orig = origin[i_node]; array_1d<double, TDim>& dest = destination[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = orig[comp]; } } void AssignVectorToVector(const ValuesVectorType& origin, ValuesVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { destination[i_node] = origin[i_node]; } } private: //number of edges unsigned int mNumberEdges; //CSR data vector for storage of the G, L and consistent M components of edge ij EdgesVectorType mNonzeroEdgeValues; //vector to store column indices of nonzero matrix elements for each row IndicesVectorType mColumnIndex; //index vector to access the start of matrix row i in the column vector IndicesVectorType mRowStartIndex; //inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours) ValuesVectorType mInvertedMassMatrix; //minimum height around one node ValuesVectorType mHmin; //lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix) ValuesVectorType mLumpedMassMatrix; //diagonal of the gradient matrix (separately stored due to special calculations) CalcVectorType mDiagGradientMatrix; //******************************************* //functions to set up elemental mass matrices void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6 //non-diagonal terms double temp = 0.08333333333333333333 * volume; // 1/12 for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.1 * volume; //non-diagonal terms double temp = 0.05 * volume; for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } }; } //namespace Kratos #endif //KRATOS_EDGE_DATA_C2C_H_INCLUDED defined
convolution_3x3_pack1to8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); __m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); __m256 _k00_1 = _mm256_loadu_ps(k1); __m256 _k01_1 = _mm256_loadu_ps(k1 + 8); __m256 _k02_1 = _mm256_loadu_ps(k1 + 16); __m256 _k10_1 = _mm256_loadu_ps(k1 + 24); __m256 _k11_1 = _mm256_loadu_ps(k1 + 32); __m256 _k12_1 = _mm256_loadu_ps(k1 + 40); __m256 _k20_1 = _mm256_loadu_ps(k1 + 48); __m256 _k21_1 = _mm256_loadu_ps(k1 + 56); __m256 _k22_1 = _mm256_loadu_ps(k1 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _sum01 = _mm256_comp_fmadd_ps(_r02, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r03, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r12, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r22, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r02, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r03, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r12, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r22, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum02 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum02); _sum12 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_comp_fmadd_ps(_r04, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r05, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r06, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r14, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r15, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r16, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r24, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r25, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r26, _k22_0, _sum03); _sum13 = _mm256_comp_fmadd_ps(_r04, _k00_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r05, _k01_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r06, _k02_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r14, _k10_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r15, _k11_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r16, _k12_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r24, _k20_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r25, _k21_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r26, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); r0 += 4; r1 += 4; r2 += 4; outptr0 += 32; outptr1 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _sum01 = _mm256_comp_fmadd_ps(_r02, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r03, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r12, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r22, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r02, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r03, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r12, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r22, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); r0 += 2; r1 += 2; r2 += 2; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; outptr1 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; k1 += 9 * 8; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_comp_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r23, _k22, _sum0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r02, _k00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r03, _k01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r04, _k02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r12, _k10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r13, _k11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r14, _k12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r22, _k20, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r23, _k21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r24, _k22, _sum1); __m256 _sum2 = _mm256_loadu_ps(outptr0 + 16); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_r03, _k00, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r04, _k01, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r05, _k02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r13, _k10, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r14, _k11, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r15, _k12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r23, _k20, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r24, _k21, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r25, _k22, _sum2); __m256 _sum3 = _mm256_loadu_ps(outptr0 + 24); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_r04, _k00, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r05, _k01, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r06, _k02, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r14, _k10, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r15, _k11, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r16, _k12, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r24, _k20, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r25, _k21, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r26, _k22, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 4; r1 += 4; r2 += 4; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_comp_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r23, _k22, _sum0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r02, _k00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r03, _k01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r04, _k02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r12, _k10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r13, _k11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r14, _k12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r22, _k20, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r23, _k21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r24, _k22, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 2; r1 += 2; r2 += 2; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_comp_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r23, _k22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; } } } static void conv3x3s2_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); __m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); __m256 _k00_1 = _mm256_loadu_ps(k1); __m256 _k01_1 = _mm256_loadu_ps(k1 + 8); __m256 _k02_1 = _mm256_loadu_ps(k1 + 16); __m256 _k10_1 = _mm256_loadu_ps(k1 + 24); __m256 _k11_1 = _mm256_loadu_ps(k1 + 32); __m256 _k12_1 = _mm256_loadu_ps(k1 + 40); __m256 _k20_1 = _mm256_loadu_ps(k1 + 48); __m256 _k21_1 = _mm256_loadu_ps(k1 + 56); __m256 _k22_1 = _mm256_loadu_ps(k1 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _sum12 = _mm256_comp_fmadd_ps(_r05, _k00_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r06, _k01_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r07, _k02_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r15, _k10_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r16, _k11_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r17, _k12_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r25, _k20_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r26, _k21_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r27, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _sum13 = _mm256_comp_fmadd_ps(_r07, _k00_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r08, _k01_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r09, _k02_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r17, _k10_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r18, _k11_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r19, _k12_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r27, _k20_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r28, _k21_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r29, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); __m256 _r010 = _mm256_broadcast_ss(r0 + 9); __m256 _r110 = _mm256_broadcast_ss(r1 + 9); __m256 _r210 = _mm256_broadcast_ss(r2 + 9); __m256 _r011 = _mm256_broadcast_ss(r0 + 10); __m256 _r111 = _mm256_broadcast_ss(r1 + 10); __m256 _r211 = _mm256_broadcast_ss(r2 + 10); __m256 _sum04 = _mm256_loadu_ps(outptr0 + 32); __m256 _sum14 = _mm256_loadu_ps(outptr1 + 32); _sum04 = _mm256_comp_fmadd_ps(_r09, _k00_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r010, _k01_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r011, _k02_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r19, _k10_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r110, _k11_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r111, _k12_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r29, _k20_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r210, _k21_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r211, _k22_0, _sum04); _sum14 = _mm256_comp_fmadd_ps(_r09, _k00_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r010, _k01_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r011, _k02_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r19, _k10_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r110, _k11_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r111, _k12_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r29, _k20_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r210, _k21_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r211, _k22_1, _sum14); _mm256_storeu_ps(outptr0 + 32, _sum04); _mm256_storeu_ps(outptr1 + 32, _sum14); __m256 _r012 = _mm256_broadcast_ss(r0 + 11); __m256 _r112 = _mm256_broadcast_ss(r1 + 11); __m256 _r212 = _mm256_broadcast_ss(r2 + 11); __m256 _r013 = _mm256_broadcast_ss(r0 + 12); __m256 _r113 = _mm256_broadcast_ss(r1 + 12); __m256 _r213 = _mm256_broadcast_ss(r2 + 12); __m256 _sum05 = _mm256_loadu_ps(outptr0 + 40); __m256 _sum15 = _mm256_loadu_ps(outptr1 + 40); _sum05 = _mm256_comp_fmadd_ps(_r011, _k00_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r012, _k01_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r013, _k02_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r111, _k10_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r112, _k11_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r113, _k12_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r211, _k20_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r212, _k21_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r213, _k22_0, _sum05); _sum15 = _mm256_comp_fmadd_ps(_r011, _k00_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r012, _k01_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r013, _k02_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r111, _k10_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r112, _k11_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r113, _k12_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r211, _k20_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r212, _k21_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r213, _k22_1, _sum15); _mm256_storeu_ps(outptr0 + 40, _sum05); _mm256_storeu_ps(outptr1 + 40, _sum15); __m256 _r014 = _mm256_broadcast_ss(r0 + 13); __m256 _r114 = _mm256_broadcast_ss(r1 + 13); __m256 _r214 = _mm256_broadcast_ss(r2 + 13); __m256 _r015 = _mm256_broadcast_ss(r0 + 14); __m256 _r115 = _mm256_broadcast_ss(r1 + 14); __m256 _r215 = _mm256_broadcast_ss(r2 + 14); __m256 _sum06 = _mm256_loadu_ps(outptr0 + 48); __m256 _sum16 = _mm256_loadu_ps(outptr1 + 48); _sum06 = _mm256_comp_fmadd_ps(_r013, _k00_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r014, _k01_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r015, _k02_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r113, _k10_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r114, _k11_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r115, _k12_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r213, _k20_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r214, _k21_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r215, _k22_0, _sum06); _sum16 = _mm256_comp_fmadd_ps(_r013, _k00_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r014, _k01_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r015, _k02_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r113, _k10_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r114, _k11_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r115, _k12_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r213, _k20_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r214, _k21_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r215, _k22_1, _sum16); _mm256_storeu_ps(outptr0 + 48, _sum06); _mm256_storeu_ps(outptr1 + 48, _sum16); __m256 _r016 = _mm256_broadcast_ss(r0 + 15); __m256 _r116 = _mm256_broadcast_ss(r1 + 15); __m256 _r216 = _mm256_broadcast_ss(r2 + 15); __m256 _r017 = _mm256_broadcast_ss(r0 + 16); __m256 _r117 = _mm256_broadcast_ss(r1 + 16); __m256 _r217 = _mm256_broadcast_ss(r2 + 16); __m256 _sum07 = _mm256_loadu_ps(outptr0 + 56); __m256 _sum17 = _mm256_loadu_ps(outptr1 + 56); _sum07 = _mm256_comp_fmadd_ps(_r015, _k00_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r016, _k01_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r017, _k02_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r115, _k10_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r116, _k11_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r117, _k12_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r215, _k20_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r216, _k21_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r217, _k22_0, _sum07); _sum17 = _mm256_comp_fmadd_ps(_r015, _k00_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r016, _k01_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r017, _k02_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r115, _k10_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r116, _k11_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r117, _k12_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r215, _k20_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r216, _k21_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r217, _k22_1, _sum17); _mm256_storeu_ps(outptr0 + 56, _sum07); _mm256_storeu_ps(outptr1 + 56, _sum17); r0 += 16; r1 += 16; r2 += 16; outptr0 += 64; outptr1 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _sum12 = _mm256_comp_fmadd_ps(_r05, _k00_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r06, _k01_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r07, _k02_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r15, _k10_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r16, _k11_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r17, _k12_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r25, _k20_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r26, _k21_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r27, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _sum13 = _mm256_comp_fmadd_ps(_r07, _k00_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r08, _k01_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r09, _k02_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r17, _k10_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r18, _k11_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r19, _k12_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r27, _k20_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r28, _k21_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r29, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); r0 += 8; r1 += 8; r2 += 8; outptr0 += 32; outptr1 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); r0 += 4; r1 += 4; r2 += 4; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; outptr1 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; k1 += 9 * 8; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _mm256_storeu_ps(outptr0 + 8, _sum01); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _mm256_storeu_ps(outptr0 + 16, _sum02); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _mm256_storeu_ps(outptr0 + 24, _sum03); __m256 _r010 = _mm256_broadcast_ss(r0 + 9); __m256 _r110 = _mm256_broadcast_ss(r1 + 9); __m256 _r210 = _mm256_broadcast_ss(r2 + 9); __m256 _r011 = _mm256_broadcast_ss(r0 + 10); __m256 _r111 = _mm256_broadcast_ss(r1 + 10); __m256 _r211 = _mm256_broadcast_ss(r2 + 10); __m256 _sum04 = _mm256_loadu_ps(outptr0 + 32); _sum04 = _mm256_comp_fmadd_ps(_r09, _k00_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r010, _k01_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r011, _k02_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r19, _k10_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r110, _k11_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r111, _k12_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r29, _k20_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r210, _k21_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r211, _k22_0, _sum04); _mm256_storeu_ps(outptr0 + 32, _sum04); __m256 _r012 = _mm256_broadcast_ss(r0 + 11); __m256 _r112 = _mm256_broadcast_ss(r1 + 11); __m256 _r212 = _mm256_broadcast_ss(r2 + 11); __m256 _r013 = _mm256_broadcast_ss(r0 + 12); __m256 _r113 = _mm256_broadcast_ss(r1 + 12); __m256 _r213 = _mm256_broadcast_ss(r2 + 12); __m256 _sum05 = _mm256_loadu_ps(outptr0 + 40); _sum05 = _mm256_comp_fmadd_ps(_r011, _k00_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r012, _k01_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r013, _k02_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r111, _k10_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r112, _k11_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r113, _k12_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r211, _k20_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r212, _k21_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r213, _k22_0, _sum05); _mm256_storeu_ps(outptr0 + 40, _sum05); __m256 _r014 = _mm256_broadcast_ss(r0 + 13); __m256 _r114 = _mm256_broadcast_ss(r1 + 13); __m256 _r214 = _mm256_broadcast_ss(r2 + 13); __m256 _r015 = _mm256_broadcast_ss(r0 + 14); __m256 _r115 = _mm256_broadcast_ss(r1 + 14); __m256 _r215 = _mm256_broadcast_ss(r2 + 14); __m256 _sum06 = _mm256_loadu_ps(outptr0 + 48); _sum06 = _mm256_comp_fmadd_ps(_r013, _k00_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r014, _k01_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r015, _k02_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r113, _k10_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r114, _k11_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r115, _k12_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r213, _k20_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r214, _k21_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r215, _k22_0, _sum06); _mm256_storeu_ps(outptr0 + 48, _sum06); __m256 _r016 = _mm256_broadcast_ss(r0 + 15); __m256 _r116 = _mm256_broadcast_ss(r1 + 15); __m256 _r216 = _mm256_broadcast_ss(r2 + 15); __m256 _r017 = _mm256_broadcast_ss(r0 + 16); __m256 _r117 = _mm256_broadcast_ss(r1 + 16); __m256 _r217 = _mm256_broadcast_ss(r2 + 16); __m256 _sum07 = _mm256_loadu_ps(outptr0 + 56); _sum07 = _mm256_comp_fmadd_ps(_r015, _k00_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r016, _k01_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r017, _k02_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r115, _k10_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r116, _k11_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r117, _k12_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r215, _k20_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r216, _k21_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r217, _k22_0, _sum07); _mm256_storeu_ps(outptr0 + 56, _sum07); r0 += 16; r1 += 16; r2 += 16; outptr0 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _mm256_storeu_ps(outptr0 + 8, _sum01); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _mm256_storeu_ps(outptr0 + 16, _sum02); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _mm256_storeu_ps(outptr0 + 24, _sum03); r0 += 8; r1 += 8; r2 += 8; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _mm256_storeu_ps(outptr0 + 8, _sum01); r0 += 4; r1 += 4; r2 += 4; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; } } }
yeefdtd.kernel_runtime.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include "local_header.h" #include "openmp_pscmc_inc.h" #include "yeefdtd.kernel_inc.h" int openmp_kgm_eqn_core_init (openmp_pscmc_env * pe ,openmp_kgm_eqn_core_struct * kerstr ){ return 0 ;} void openmp_kgm_eqn_core_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_kgm_eqn_core_struct )); } int openmp_kgm_eqn_core_get_num_compute_units (openmp_kgm_eqn_core_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_kgm_eqn_core_get_xlen (){ return IDX_OPT_MAX ;} int openmp_kgm_eqn_core_exec (openmp_kgm_eqn_core_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_kgm_eqn_core_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->extA0 , ( kerstr )->extA1 , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->GEXT)[0] , ( ( kerstr )->rfz0)[0] , ( ( kerstr )->g_beg)[0] , ( ( kerstr )->swap_input)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_kgm_eqn_core_scmc_set_parameter_outEB (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_inEB (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_extA0 (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->extA0 = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_extA1 (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->extA1 = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_xoffset (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_yoffset (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_zoffset (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_y_cpu_core (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_numvec (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_XLEN (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_YLEN (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_ZLEN (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_ovlp (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_xblock (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_yblock (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_zblock (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_num_ele (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_DT (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_M (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->M = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_Q (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Q = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_DX (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DX = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_GEXT (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->GEXT = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_rfz0 (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->rfz0 = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_g_beg (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->g_beg = pm->d_data); } int openmp_kgm_eqn_core_scmc_set_parameter_swap_input (openmp_kgm_eqn_core_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->swap_input = pm->d_data); } int openmp_kgm_calc_rho_init (openmp_pscmc_env * pe ,openmp_kgm_calc_rho_struct * kerstr ){ return 0 ;} void openmp_kgm_calc_rho_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_kgm_calc_rho_struct )); } int openmp_kgm_calc_rho_get_num_compute_units (openmp_kgm_calc_rho_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_kgm_calc_rho_get_xlen (){ return IDX_OPT_MAX ;} int openmp_kgm_calc_rho_exec (openmp_kgm_calc_rho_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_kgm_calc_rho_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->refz0)[0] , ( ( kerstr )->q)[0] , ( ( kerstr )->dtodx)[0] , ( ( kerstr )->mode)[0] , ( ( kerstr )->swap_input)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_kgm_calc_rho_scmc_set_parameter_outEB (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_inEB (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_xoffset (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_yoffset (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_zoffset (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_y_cpu_core (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_numvec (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_XLEN (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_YLEN (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_ZLEN (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_ovlp (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_xblock (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_yblock (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_zblock (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_num_ele (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_DT (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_M (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->M = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_Q (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Q = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_DX (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DX = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_refz0 (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->refz0 = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_q (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->q = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_dtodx (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->dtodx = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_mode (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->mode = pm->d_data); } int openmp_kgm_calc_rho_scmc_set_parameter_swap_input (openmp_kgm_calc_rho_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->swap_input = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_init (openmp_pscmc_env * pe ,openmp_PML_FDTD_CURL_BWD_struct * kerstr ){ return 0 ;} void openmp_PML_FDTD_CURL_BWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_PML_FDTD_CURL_BWD_struct )); } int openmp_PML_FDTD_CURL_BWD_get_num_compute_units (openmp_PML_FDTD_CURL_BWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_PML_FDTD_CURL_BWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_PML_FDTD_CURL_BWD_exec (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_PML_FDTD_CURL_BWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->outPMLEB , ( kerstr )->inPMLEB , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->abc_dir)[0] , ( ( kerstr )->level)[0] , ( ( kerstr )->pml_m)[0] , ( ( kerstr )->max_sigma)[0] , ( ( kerstr )->allxmax)[0] , ( ( kerstr )->allymax)[0] , ( ( kerstr )->allzmax)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_outEB (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_inEB (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_outPMLEB (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outPMLEB = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_inPMLEB (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inPMLEB = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_xoffset (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_yoffset (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_zoffset (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_y_cpu_core (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_numvec (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_XLEN (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_YLEN (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_ZLEN (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_ovlp (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_xblock (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_yblock (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_zblock (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_num_ele (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_DT (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_M (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->M = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_Q (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Q = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_DX (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DX = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_DY (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DY = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_DZ (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DZ = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_abc_dir (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->abc_dir = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_level (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->level = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_pml_m (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->pml_m = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_max_sigma (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->max_sigma = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_allxmax (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->allxmax = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_allymax (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->allymax = pm->d_data); } int openmp_PML_FDTD_CURL_BWD_scmc_set_parameter_allzmax (openmp_PML_FDTD_CURL_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->allzmax = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_init (openmp_pscmc_env * pe ,openmp_PML_FDTD_CURL_FWD_struct * kerstr ){ return 0 ;} void openmp_PML_FDTD_CURL_FWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_PML_FDTD_CURL_FWD_struct )); } int openmp_PML_FDTD_CURL_FWD_get_num_compute_units (openmp_PML_FDTD_CURL_FWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_PML_FDTD_CURL_FWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_PML_FDTD_CURL_FWD_exec (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_PML_FDTD_CURL_FWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( kerstr )->outPMLEB , ( kerstr )->inPMLEB , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , ( ( kerstr )->M)[0] , ( ( kerstr )->Q)[0] , ( ( kerstr )->DX)[0] , ( ( kerstr )->DY)[0] , ( ( kerstr )->DZ)[0] , ( ( kerstr )->abc_dir)[0] , ( ( kerstr )->level)[0] , ( ( kerstr )->pml_m)[0] , ( ( kerstr )->max_sigma)[0] , ( ( kerstr )->allxmax)[0] , ( ( kerstr )->allymax)[0] , ( ( kerstr )->allzmax)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_outEB (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_inEB (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_outPMLEB (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outPMLEB = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_inPMLEB (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inPMLEB = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_xoffset (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xoffset = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_yoffset (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yoffset = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_zoffset (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zoffset = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_y_cpu_core (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_numvec (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_XLEN (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_YLEN (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_ZLEN (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_ovlp (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_xblock (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_yblock (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_zblock (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_num_ele (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_DT (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_M (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->M = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_Q (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->Q = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_DX (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DX = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_DY (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DY = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_DZ (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DZ = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_abc_dir (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->abc_dir = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_level (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->level = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_pml_m (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->pml_m = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_max_sigma (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->max_sigma = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_allxmax (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->allxmax = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_allymax (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->allymax = pm->d_data); } int openmp_PML_FDTD_CURL_FWD_scmc_set_parameter_allzmax (openmp_PML_FDTD_CURL_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->allzmax = pm->d_data); } int openmp_merge_current_init (openmp_pscmc_env * pe ,openmp_merge_current_struct * kerstr ){ return 0 ;} void openmp_merge_current_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_merge_current_struct )); } int openmp_merge_current_get_num_compute_units (openmp_merge_current_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_merge_current_get_xlen (){ return IDX_OPT_MAX ;} int openmp_merge_current_exec (openmp_merge_current_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_merge_current_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_merge_current_scmc_set_parameter_outEB (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_merge_current_scmc_set_parameter_inEB (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_merge_current_scmc_set_parameter_y_cpu_core (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_merge_current_scmc_set_parameter_numvec (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_merge_current_scmc_set_parameter_XLEN (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_merge_current_scmc_set_parameter_YLEN (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_merge_current_scmc_set_parameter_ZLEN (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_merge_current_scmc_set_parameter_ovlp (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_merge_current_scmc_set_parameter_xblock (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_merge_current_scmc_set_parameter_yblock (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_merge_current_scmc_set_parameter_zblock (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_merge_current_scmc_set_parameter_num_ele (openmp_merge_current_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_merge_current_2_init (openmp_pscmc_env * pe ,openmp_merge_current_2_struct * kerstr ){ return 0 ;} void openmp_merge_current_2_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_merge_current_2_struct )); } int openmp_merge_current_2_get_num_compute_units (openmp_merge_current_2_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_merge_current_2_get_xlen (){ return IDX_OPT_MAX ;} int openmp_merge_current_2_exec (openmp_merge_current_2_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_merge_current_2_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_merge_current_2_scmc_set_parameter_outEB (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_inEB (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_y_cpu_core (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_numvec (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_XLEN (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_YLEN (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_ZLEN (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_ovlp (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_xblock (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_yblock (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_zblock (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_merge_current_2_scmc_set_parameter_num_ele (openmp_merge_current_2_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Div_FWD_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Div_FWD_4th_struct )); } int openmp_Yee_FDTD_Div_FWD_4th_get_num_compute_units (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Div_FWD_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Div_FWD_4th_exec (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Div_FWD_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Div_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Div_FWD_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Div_FWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Div_FWD_struct )); } int openmp_Yee_FDTD_Div_FWD_get_num_compute_units (openmp_Yee_FDTD_Div_FWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Div_FWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Div_FWD_exec (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Div_FWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_outEB (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_inEB (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_numvec (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_xblock (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_yblock (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_zblock (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Div_FWD_scmc_set_parameter_DT (openmp_Yee_FDTD_Div_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Div_BWD_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Div_BWD_4th_struct )); } int openmp_Yee_FDTD_Div_BWD_4th_get_num_compute_units (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Div_BWD_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Div_BWD_4th_exec (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Div_BWD_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Div_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Div_BWD_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Div_BWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Div_BWD_struct )); } int openmp_Yee_FDTD_Div_BWD_get_num_compute_units (openmp_Yee_FDTD_Div_BWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Div_BWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Div_BWD_exec (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Div_BWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_outEB (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_inEB (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_numvec (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_xblock (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_yblock (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_zblock (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Div_BWD_scmc_set_parameter_DT (openmp_Yee_FDTD_Div_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_FWD_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_FWD_4th_struct )); } int openmp_Yee_FDTD_Curl_FWD_4th_get_num_compute_units (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_FWD_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_FWD_4th_exec (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_FWD_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_FWD_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_FWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_FWD_struct )); } int openmp_Yee_FDTD_Curl_FWD_get_num_compute_units (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_FWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_FWD_exec (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_FWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_FWD_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_BWD_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_BWD_4th_struct )); } int openmp_Yee_FDTD_Curl_BWD_4th_get_num_compute_units (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_BWD_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_BWD_4th_exec (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_BWD_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_BWD_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_BWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_BWD_struct )); } int openmp_Yee_FDTD_Curl_BWD_get_num_compute_units (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_BWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_BWD_exec (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_BWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_BWD_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Grad_FWD_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Grad_FWD_4th_struct )); } int openmp_Yee_FDTD_Grad_FWD_4th_get_num_compute_units (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Grad_FWD_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Grad_FWD_4th_exec (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Grad_FWD_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Grad_FWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Grad_FWD_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Grad_FWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Grad_FWD_struct )); } int openmp_Yee_FDTD_Grad_FWD_get_num_compute_units (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Grad_FWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Grad_FWD_exec (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Grad_FWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_outEB (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_inEB (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_numvec (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_xblock (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_yblock (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_zblock (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Grad_FWD_scmc_set_parameter_DT (openmp_Yee_FDTD_Grad_FWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Grad_BWD_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Grad_BWD_4th_struct )); } int openmp_Yee_FDTD_Grad_BWD_4th_get_num_compute_units (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Grad_BWD_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Grad_BWD_4th_exec (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Grad_BWD_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Grad_BWD_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Grad_BWD_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Grad_BWD_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Grad_BWD_struct )); } int openmp_Yee_FDTD_Grad_BWD_get_num_compute_units (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Grad_BWD_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Grad_BWD_exec (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Grad_BWD_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_outEB (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_inEB (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_numvec (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_xblock (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_yblock (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_zblock (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Grad_BWD_scmc_set_parameter_DT (openmp_Yee_FDTD_Grad_BWD_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_B_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_B_4th_struct )); } int openmp_Yee_FDTD_Curl_B_4th_get_num_compute_units (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_B_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_B_4th_exec (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_B_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_B_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_B_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_B_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_B_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_B_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_B_struct )); } int openmp_Yee_FDTD_Curl_B_get_num_compute_units (openmp_Yee_FDTD_Curl_B_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_B_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_B_exec (openmp_Yee_FDTD_Curl_B_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_B_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_B_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_B_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_E_4th_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_E_4th_struct )); } int openmp_Yee_FDTD_Curl_E_4th_get_num_compute_units (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_E_4th_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_E_4th_exec (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_E_4th_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_E_4th_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_E_4th_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); } int openmp_Yee_FDTD_Curl_E_init (openmp_pscmc_env * pe ,openmp_Yee_FDTD_Curl_E_struct * kerstr ){ return 0 ;} void openmp_Yee_FDTD_Curl_E_get_struct_len (size_t * len ){ ((len)[0] = sizeof(openmp_Yee_FDTD_Curl_E_struct )); } int openmp_Yee_FDTD_Curl_E_get_num_compute_units (openmp_Yee_FDTD_Curl_E_struct * kerstr ){ return omp_get_max_threads ( ) ;} int openmp_Yee_FDTD_Curl_E_get_xlen (){ return IDX_OPT_MAX ;} int openmp_Yee_FDTD_Curl_E_exec (openmp_Yee_FDTD_Curl_E_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){ #pragma omp parallel { int xid ; int yid ; int numt = omp_get_num_threads ( ) ; int tid = omp_get_thread_num ( ) ; int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ; int ymin = ( tid * ysingle ) ; int ymax = ( ( 1 + tid ) * ysingle ) ; for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt ))) { for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 ))) { openmp_Yee_FDTD_Curl_E_scmc_kernel ( ( kerstr )->outEB , ( kerstr )->inEB , ( ( kerstr )->y_cpu_core)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->xblock)[0] , ( ( kerstr )->yblock)[0] , ( ( kerstr )->zblock)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->DT)[0] , yid , scmc_internal_g_ylen ); }}} return 0 ;} int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_outEB (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->outEB = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_inEB (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->inEB = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_y_cpu_core (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->y_cpu_core = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_numvec (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->numvec = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_XLEN (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->XLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_YLEN (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->YLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_ZLEN (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ZLEN = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_ovlp (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->ovlp = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_xblock (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->xblock = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_yblock (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->yblock = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_zblock (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->zblock = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_num_ele (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->num_ele = pm->d_data); } int openmp_Yee_FDTD_Curl_E_scmc_set_parameter_DT (openmp_Yee_FDTD_Curl_E_struct * kerstr ,openmp_pscmc_mem * pm ){ ( ( kerstr )->DT = pm->d_data); }
test83.c
struct { int a;} b; int foo(int a, int b) { return a; } int bar(int a, int b) { return b; } void pr(char * str) {} int main() { int y = 10; int i[4]; int a = 10; int (*fptr[4])(int, int); int p[4]; p[3] = 0; fptr[3] = &foo; pr("Below"); i[3] = fptr[3](a * 10, bar(2, p[3])); #pragma omp parallel { } }
Wparentheses-3.c
/* PR c/70436 */ /* { dg-additional-options "-Wparentheses -fno-openmp" } */ int a, b, c; void bar (void); void baz (void); void f1 (void) { int i, j; if (a) /* { dg-warning "ambiguous" } */ #pragma omp for for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ while (1) #pragma omp for for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp for for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) #pragma omp for for (i = 0; i < 10; i++) if (b) /* { dg-warning "ambiguous" } */ #pragma omp parallel for for (j = 0; j < 10; j++) if (c) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp taskloop for (i = 0; i < 10; i++) if (b) #pragma omp parallel for for (j = 0; j < 10; j++) if (c) bar (); else baz (); else bar (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp taskloop simd for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp for collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp critical if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp simd for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp for simd schedule(runtime) for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp master if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp parallel if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp parallel for for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp parallel for simd for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp single if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp task if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp taskgroup if (b) bar (); else baz (); if (a) #pragma omp for for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } if (a) { #pragma omp taskloop for (i = 0; i < 10; ++i) if (b) bar (); } else baz (); if (a) #pragma omp for collapse(2) for (i = 0; i < 10; i++) { for (j = 0; j < 10; j++) if (b) bar (); else baz (); } if (a) #pragma omp critical { if (b) bar (); else baz (); } if (a) for (i = 0; i < 10; i++) #pragma omp simd for (j = 0; j < 10; j++) { if (b) bar (); } else baz (); if (a) #pragma omp for simd schedule(dynamic, 5) for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } if (a) #pragma omp master { if (b) bar (); else baz (); } if (a) #pragma omp parallel { if (b) bar (); else baz (); } if (a) { #pragma omp parallel if (b) bar (); else baz (); } if (a) for (i = 0; i < 10; i++) #pragma omp parallel for for (j = 0; j < 10; j++) { if (b) bar (); } else baz (); if (a) for (i = 0; i < 10; i++) #pragma omp parallel for simd for (j = 0; j < 10; j++) { if (b) bar (); } else baz (); if (a) #pragma omp single { if (b) bar (); } else baz (); if (a) #pragma omp task { if (b) bar (); } else baz (); if (a) #pragma omp taskgroup { if (b) bar (); else baz (); } if (a) #pragma omp taskloop simd for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } } void f2 (int d, int e, int f) { if (a) /* { dg-warning "ambiguous" } */ #pragma omp ordered if (b) bar (); else baz (); if (d) /* { dg-warning "ambiguous" } */ #pragma omp ordered threads if (b) bar (); else baz (); if (e) #pragma omp ordered { if (b) bar (); else baz (); } if (f) #pragma omp ordered threads { if (b) bar (); else baz (); } }
omp_for_nowait.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_for_nowait() { int result; int count; int j; int myarray[LOOPCOUNT]; result = 0; count = 0; #pragma omp parallel { int rank; int i; rank = omp_get_thread_num(); #pragma omp for nowait for (i = 0; i < LOOPCOUNT; i++) { if (i == 0) { my_sleep(SLEEPTIME); count = 1; #pragma omp flush(count) } } #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { #pragma omp flush(count) if (count == 0) result = 1; } } return result; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_nowait()) { num_failed++; } } return num_failed; }
pi4.c
/* * This code calculates pi using the formula to calculate * the atan(z) which is the integral from 0 to z of 1/(1+x*x) * times dx. atan(1) is 45 degrees or pi/4 */ #include <omp.h> static long num_steps = 100000; /* number of intervals */ double step; /* the size of the interval - dx */ #define NUM_THREADS 2 void main () { int i; /* Loop control variable */ double x; /* The current x position for function evaluation */ double pi=0.0; /* final results */ double sum; /* Maintains sum of partial results */ step = 1.0 / ( double ) num_steps; /* * This may be done more flexibly by using an environment * variable instead. */ omp_set_num_threads( NUM_THREADS ); /* * Each thread executes the code in the pragma below * * See what happens if i is not private! */ #pragma omp parallel private ( i, x, sum ) { int id; /* The identity of the thread */ id = omp_get_thread_num(); /* * Calculate the integral */ for ( i = id, sum = 0.0; i < num_steps; i = i + NUM_THREADS ) { x = ( i + 0.5 ) * step; sum += 4.0 / ( 1.0 + x * x ); } /* Because pi is shared between threads, this step must be * carried out autonomously. * * See what happens when this pragma is removed! */ #pragma omp critical /* * Multiply by dx */ pi += sum * step; } /* end #pragma omp parallel */ printf( "The computed value of pi is %f\n", pi ); }
GB_unaryop__ainv_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_uint32 // op(A') function: GB_tran__ainv_uint64_uint32 // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_uint32 ( uint64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_int16 // op(A') function: GB_tran__lnot_uint32_int16 // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_int16 ( uint32_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fc64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc64_uint16 // op(A') function: GB_unop_tran__identity_fc64_uint16 // C type: GxB_FC64_t // A type: uint16_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc64_uint16 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
markstm.c
//** 2 functions called from i2.c **// /* Move markers by using simple Runge-Kutta method */ void movemarkomp() { /* Vx, Vy buffer */ double dvxdx,dvxdy,dvydx,dvydy,celdx,celdy,vx0,vx1,vx2,vx3,vx4,vy0,vy1,vy2,vy3,vy4,ee0,ee1,ee2,ee3,ee4,sp0,sp1,sp2,sp3,sp4,pr0,pr1,pr2,pr3,pr4; /* Water */ double vxwater,vywater; long int mm1,marknum1,m10,m20,m30,m1,m2,m3; /* Erosion-Sedimentation Y/N */ int n1; int mm2; /* Nonstabilyty for immobile markers */ double xnonstab=0.50,ynonstab=0.60; double dpdx,dpdy,e,n,vxkoef,vykoef,dx,dy; double start; /* Hydration front progress */ #if setup>9 start=omp_get_wtime(); /* dehydration */ if(vyfluid!=0 && timesum>1e+11) hydration2omp(); fprintf(fp_log,"\n Time taken for hydration = %e s \n",omp_get_wtime()-start); #endif /* Save number of markers */ marknum1=marknum; /* Surface changes */ #if setup>9 start=omp_get_wtime(); if (timestep && erosmod) erosion(); fprintf(fp_log,"\n Time taken for erosion = %e s \n",omp_get_wtime()-start); #endif /* Move markers */ #pragma omp parallel for shared(markx,marky,markt,markim,markk,markxx,markxy,markd,markv,markp,markexx,markexy,markw,marke,marknum1,follow,nm,ystpy,m10_hr,m11_hr,marknum,outgrid,eroslev,vyfluid,vymelt,GXKOEF,GYKOEF,markmod,timestep,zdeep,tdeep,markht,xsize,ysize,xnumx,ynumy,gx,gy,pr,esp,exx,exy,vx,vy,markcp,markkt,markkf,markkp,markro,markbb,markaa,marknu,markn0,markn1,markll,marka0,marka1,markb0,markb1,markdh,markdv,markss,markmm,marks1,start_cond,stoksmod) \ private(mm1,mm2,m10,m20,m1,m2,m3,n1,vxwater,vywater,e,n,dpdx,dpdy,a,b,vxkoef,vykoef,vx0,vy0,sp0,ee0,pr0,vx1,vy1,sp1,ee1,pr1,vx2,vy2,sp2,ee2,pr2,vx3,vy3,sp3,ee3,pr3,vx4,vy4,sp4,ee4,pr4,dx,dy) \ schedule(runtime) for (mm1=0;mm1<marknum;mm1++) { /* Marker type */ mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100; if( ((markx[mm1]>=0 && marky[mm1]>=0 && (markx[mm1])<=xsize && (marky[mm1])<=ysize) || outgrid!=1) && !markim[mm2] ) { // Search marker location within nodal grid m10=m1serch(markx[mm1]); m20=m2serch(marky[mm1]); /**/ /* Erosion-Sedimentation */ if((marky[mm1])<=eroslev) n1=1; else n1=0; /* Water marker move */ vxwater=vywater=0; if(markt[mm1]>=50 && markt[mm1]<100) { /* Water velocity */ vywater=vyfluid; if(markd[mm1]>1100.0) vywater=vymelt; /* Fluid in rock */ if(vyfluid>0 && (markk[mm1]==0 || markk[mm1]>298.0)) { /* Horizontal,Vertical P-cell index */ m1=m10; if(markx[mm1]>(gx[m1]+gx[m1+1])/2.0) m1+=1; if(m1<1) m1=1; if(m1>xnumx-2) m1=xnumx-2; m2=m20; if(marky[mm1]>(gy[m2]+gy[m2+1])/2.0) m2+=1; if(m2<1) m2=1; if(m2>ynumy-2) m2=ynumy-2; /* Pressure gradients */ e=(markx[mm1]-(gx[m1-1]+gx[m1])/2.0)/((gx[m1+1]-gx[m1-1])/2.0); n=(marky[mm1]-(gy[m2-1]+gy[m2])/2.0)/((gy[m2+1]-gy[m2-1])/2.0); m3=m1*ynumy+m2; dpdx=2.0*((1.0-n)*(pr[m3+ynumy]-pr[m3])+n*(pr[m3+ynumy+1]-pr[m3+1]))/(gx[m1+1]-gx[m1-1]); dpdy=2.0*((1.0-e)*(pr[m3+1]-pr[m3])+e*(pr[m3+ynumy+1]-pr[m3+ynumy]))/(gy[m2+1]-gy[m2-1]); /* Recalc velocity koefficients */ vxkoef=(1000.0*GXKOEF-dpdx)/(2300.0*9.81); vykoef=(1000.0*GYKOEF-dpdy)/(2300.0*9.81); if(vxkoef>2.0) vxkoef=2.0; if(vxkoef<-2.0) vxkoef=-2.0; if(vykoef>2.0) vykoef=2.0; if(vykoef<-2.0) vykoef=-2.0; /* Recalc velocity */ vxwater=vywater*vxkoef; vywater*=vykoef; } else /* Fluid in water */ { vxwater=0; vywater=-ABSV(vywater); } } /* Motion Calc ///////////////////////////////// */ /* Vx, Vy, EpsII Simple calc */ if(markmod==1) { /* Interpolate velocity, pressure?, EE(eii), and ESP (spin) */ // These marker values are not stored, they are used in this routine to move each marker in private, thats it. allinteriomp(markx[mm1],marky[mm1],m10,m20,&vx0,&vy0,&pr0,&sp0,&ee0); vx0+=vxwater; vy0+=vywater; /**/ /* fprintf(fp_log,"SIMPLE %ld %d %e %e %e %e %e",mm1,markt[mm1],markx[mm1],marky[mm1],vx0,vy0,sp0); getchar(); */ } /* Vx, Vy, EpsII 4 Runge-Kutta koef calc */ else { allinteriomp(markx[mm1],marky[mm1],m10,m20,&vx1,&vy1,&pr1,&sp1,&ee1); vx1+=vxwater; vy1+=vywater; /**/ //fprintf(fltest,"RK4 %ld %d %e %e %e %e %e %e \n",mm1,markt[mm1],markx[mm1],marky[mm1],vx1,vy1,sp1,ee1); /**/ allinteriomp(markx[mm1]+vx1*timestep/2.0,marky[mm1]+vy1*timestep/2.0,m10,m20,&vx2,&vy2,&pr2,&sp2,&ee2); vx2+=vxwater; vy2+=vywater; /**/ allinteriomp(markx[mm1]+vx2*timestep/2.0,marky[mm1]+vy2*timestep/2.0,m10,m20,&vx3,&vy3,&pr3,&sp3,&ee3); vx3+=vxwater; vy3+=vywater; /**/ allinteriomp(markx[mm1]+vx3*timestep,marky[mm1]+vy3*timestep,m10,m20,&vx4,&vy4,&pr4,&sp4,&ee4); vx4+=vxwater; vy4+=vywater; /**/ /* Vx,Vy, EpsXX, EpsYY, EpsXY calc after Runge-Kutta */ vx0=(vx1+2.0*vx2+2.0*vx3+vx4)/6.0; vy0=(vy1+2.0*vy2+2.0*vy3+vy4)/6.0; if(markmod==2) { sp0=(sp1+2.0*sp2+2.0*sp3+sp4)/6.0; ee0=(ee1+2.0*ee2+2.0*ee3+ee4)/6.0; } else { sp0=sp1; ee0=ee1; } } /* Orthogonal motion only */ if (outgrid==2) { if(markx[mm1]<0 || (markx[mm1])>xsize) vy0=0; if(marky[mm1]<0 || (marky[mm1])>ysize) vx0=0; } /* Normal markers */ if(markt[mm1]<100) { /* Markers coming from below the model */ if(marky[mm1]>zdeep && markk[mm1]<tdeep) markk[mm1]=tdeep; // If you do not want to apply a large temperature lower boundary condition use: //if(marky[mm1]>zdeep && vy0<0 && markk[mm1]<tdeep) markk[mm1]=tdeep; /* Normal markers */ /* X,Y calc after Runge-Kutta */ markx[mm1]+=(timestep*vx0); marky[mm1]+=(timestep*vy0); if(marke[mm1]>0) { marke[mm1]+=(timestep*ee0); } sp0*=timestep; /* Turcotte & Schubert, 1995 rotation formula */ if(stoksmod==1) { sp1=markxx[mm1]*cos(sp0)*cos(sp0)-markxx[mm1]*sin(sp0)*sin(sp0)+markxy[mm1]*sin(2.0*sp0); sp3=0.5*(-markxx[mm1]-markxx[mm1])*sin(2.0*sp0)+markxy[mm1]*cos(2.0*sp0); markxx[mm1]=sp1; markxy[mm1]=sp3; } /* Jaumann corrotation formula */ if(stoksmod==2) { sp1=markxx[mm1]+markxy[mm1]*2.0*sp0; sp3=markxy[mm1]+0.5*(-markxx[mm1]-markxx[mm1])*2.0*sp0; markxx[mm1]=sp1; markxy[mm1]=sp3; } /* Out of grid marker reset */ if(markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) { markk[mm1]=0; markd[mm1]=-1.0; markw[mm1]=-1.0; marke[mm1]=0; } } /* Immobile markers */ else { /* X,Y calc after Runge-Kutta */ // Which velocity is used here, if were before located outside of grid ... markx[mm1]+=(timestep*vx0); marky[mm1]+=(timestep*vy0); /* Check new position, add marker at end (marknum1) */ // Immobile markers that now enter grid if(markx[mm1]>=0 && marky[mm1]>=0 && markx[mm1]<=xsize && marky[mm1]<=ysize) { #pragma omp critical(newmark) { #pragma omp flush(marknum1) /* Type save */ markt[marknum1]=markt[mm1]-100; /* X,Y calc after Runge-Kutta */ // Give marker new location (within grid) markx[marknum1]=markx[mm1]; marky[marknum1]=marky[mm1]; /* Temperature Reset */ markk[marknum1]=0; markd[marknum1]=-1.0; markv[marknum1]=0; /* Strain Reset */ marke[marknum1]=0; /* Stress Reset */ markxx[marknum1]=0; markxy[marknum1]=0; /* Pressure Reset */ markp[marknum1]=0; /* Strain rate Reset */ markexx[marknum1]=0; markexy[marknum1]=0; /* Add aditional markers counter */ marknum1++; /* X,Y reset for immobile marker */ markx[mm1]=markk[mm1]; marky[mm1]=markv[mm1]; // If new marker is interesting for picking algorithm, flag to follow // Note is hard-coded in i2.c as well. Only here excluded fluid markers, since immobile can not become fluid #if setup>9 if (start_cond==1 && marky[marknum1]<85e3 && markx[marknum1]>gx[m10_hr] && markx[marknum1]<gx[m11_hr] && markt[marknum1]>1 && markt[marknum1]<50) { follow[marknum1]=1; // #pragma omp flush(nm) nm++; } #endif } } /* Check,Reset old position */ // Use markk and v as dummy from above, so dx and/or dy are 0 if marker is newly added dx=markx[mm1]-markk[mm1]; dy=marky[mm1]-markv[mm1]; dy=pow(dx*dx+dy*dy,0.5); /* if(dy>ystpy || (marky[mm1]<0 && vy0<0) || (marky[mm1]>ysize && vy0>0) || (markx[mm1]<0 && vx0<0) || (markx[mm1]>xsize && vx0>0)) */ // If moved by more than one cell, reset to old position ? if(dy>ystpy) { /* X,Y reset for immobile marker */ markx[mm1]=markk[mm1]; marky[mm1]=markv[mm1]; } } /* End Motion Calc ///////////////////////////////// */ } } // End omp-section move markers /* Mark num */ if(marknum1>MAXMRK) {fprintf(fp_log,"Space out in markx[]"); fflush(fp_log); exit(0);} /* Reset aditional markers */ mm1=0; while(marknum1>marknum && mm1<marknum) { /* Reload marker */ if((markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) && markt[mm1]<100) { /* Decrease aditional markers counter */ marknum1--; /* Type save */ markt[mm1]=markt[marknum1]; /* Temperature Reset */ markk[mm1]=0; markd[mm1]=-1.0; /* Strain Reset */ marke[mm1]=0; /* Stress Reset */ markxx[mm1]=0; markxy[mm1]=0; /* Pressure Reset */ markp[mm1]=0; /* Strain rate Reset */ markexx[mm1]=0; markexy[mm1]=0; /* X,Y reload */ markx[mm1]=markx[marknum1]; marky[mm1]=marky[marknum1]; } /* Increase markers counter */ mm1++; } fprintf(fp_log,"\n Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1);fflush(fp_log); /* Set new marker number */ marknum=marknum1; /* Incr cycle of sedimentation */ sedimnum++; } /* End OMP move markers by using Simple/Runge-Kutta method */ /* ro[],nu[] recalc after marker positions */ void ronurecalcomp() { /* Counters */ long int m1,m2,m3,m10,m20; int mm2,yn,mm3,n1,n2,ncount=0,nt,tid; long int mm1; double dx,dy,swt,swt1,celdx,celdy; double wro,mnu,mgg,maa,mdro,msxxe,msxye,mexxe,mexye,mro,mcp,mkt,mht,mbb,mdi0,mdi1,mwa,dmwa,mxmelt,mhlatent; double Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt; // Here in this loop epsin is 2nd invariant of visco-plastic strainrate double sigin,epsin; /* TD Database variables, dTK,dPB - TK, PB step for tabulation in TD database */ double H0,H1,H2,H3,R0,R1,R2,R3,G0,G1,G2,G3,W0,W1,W2,W3,dTK=20.0,dPB=1000.0,n,e; /* Phase transition variables */ double p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in; /* RO, NU equations var */ double mpb=1.0,mtk=300.0,numax=0,numin=0; double start,xwall,b1,b2,slope_wall,gelbeg; start=omp_get_wtime(); Mgg=Mro=Mwa=Mcp=Mbb=Maa=Mdhh=Mkt=0; if (printmod) fprintf(fp_log,"\n Number of nodes = %ld Number of markers = %ld \n",nodenum,marknum); fflush(fp_log); #pragma omp parallel {nt=omp_get_num_threads();} /* Layering on sediments */ m1=(long int)(sedimnum/sedimcyc); m2=((long int)(m1/2))*2; if(m2==m1) yn=3; else yn=4; /* ADD MARKERS TO THE v-CELLS ========================== */ /* Clear ro[],nu[] wt */ for (m1=0;m1<nodenum;m1++) { ro0[m1]=0; et0[m1]=0; nu0[m1]=0; nd0[m1]=0; gg0[m1]=0; gd0[m1]=0; sxxe0[m1]=0; sppe0[m1]=0; sbritn0[m1]=0; // yield stress sxye0[m1]=0; exxe0[m1]=0; exye0[m1]=0; dro0[m1]=0; drp0[m1]=0; cp0[m1]=0; kt0[m1]=0; ht0[m1]=0; tk0[m1]=0; mrx0[m1]=0; mry0[m1]=0; mvx0[m1]=0; mvy0[m1]=0; sol0[m1]=0; sol0[nodenum+m1]=0; sol0[nodenum2+m1]=0; sol1[m1]=0; sol1[nodenum+m1]=0; sol1[nodenum2+m1]=0; } #if setup>9 /* (1) Erosion-sedimentation and melting account for all markers */ #pragma omp parallel for shared(markx,marky,markk,markt,marke,markd,marknum,waterlev,erosmod,deserp,dyserp,xsize,ysize,gx,gy,xnumx,ynumy,ep,pr,timesum,res_high) \ private(mm1,mm2,m3,mtk,mpb,m10,m20,mxmelt,mhlatent) \ firstprivate(yn) \ schedule(runtime) for (mm1=0;mm1<marknum;mm1++) { /* Check markers out of grid */ if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markk[mm1]>0 && markt[mm1]<50) { /* Up Left Node X,Y Num */ m10=m1serch(markx[mm1]); m20=m2serch(marky[mm1]); m3=m10*ynumy+m20; mm2=(int)markt[mm1]; /* Erosion/sedimentation account */ if(erosmod) erosmarkomp(mm1,yn,m10,markx[mm1],marky[mm1],markt,marke,markd); /* Water/Air account */ if(markt[mm1]<2) { /* Change marker type */ if((marky[mm1])>waterlev) markt[mm1]=1; else markt[mm1]=0; } /* P, T parameters calc */ // 1e-5 since convert to bars for melting look-up? mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m10,m20); mtk=markk[mm1]; // Remove initial weak zone for subduction initiation after X My if(timesum>(20.0e6*3.15576e+7) && markt[mm1]==12) {markt[mm1]=9;} /* Serpentinization of brittle mantle faults at sub-surface */ if((markt[mm1]==9 || markt[mm1]==9 || markt[mm1]==9) && marke[mm1]>deserp && marky[mm1]<dyserp) { /* Mantle to Antigorite transformation */ markt[mm1]=13; markd[mm1]=-1.0; } /* Mantle to Antigorite transformation */ antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m10,markt); /* Rocks to rock+melt transformation */ // Note markt passes in address of first element of array to function and allows for modification there if (meltmod) meltingomp(mtk,mpb,mm1,mm2,markt,marke,&mxmelt,&mhlatent); } } // End OMP section erosion-sedimentation // Open file for storing marker interface data if (n0==1) { flfric = fopen(fileTxtOutputFric,"w"); fprintf(flfric," rocktype markx marky markpressure sbrit markxx markxy \n"); } #endif if (printmod==10000) fprintf(fp_log,"\n Time taken for erosmark/antigor/melting in ronurecalc = %e s \n",omp_get_wtime()-start); start=omp_get_wtime(); /* (2) Add ro[] nu[] etc. using selected markers */ #pragma omp parallel shared(marknum,gridmod,markx,marky,markk,markt,erosmod,sedilev,markw,markd,eroslev, \ waterlev,zdeep,densimod,markht,marke,markexx,markexy,markxx,markxy,markp, \ nodenum,nodenum2,markvx,markvy,markv,markwa,markim,xsize,ysize,gx,gy,xnumx,ynumy,ep,pr,vx,vy,markf0,markf1,markbb,markaa,markro,markgg, \ markn0,markn1,marks0,marks1,marknu,markcp,markkt,markkf,markkp,nubeg,nuend,strmin,strmax,\ exy,esp,exx,pbmin,pbstp,pbnum,tkmin,tkstp,tknum,pbmin1,pbstp1,pbnum1,tkmin1,tkstp1,tknum1,timesum,td, \ zmpor,tkpor,markll,hidrl,hidry,lambfld,marka0,markb0,marke1,marka1,markb1,marke0,msbrit,msii_old, \ tk_updipsez0,tk_updipsez1,mgamma_vw,mgamma_vs,mvc_vs,mvc_vw,mus_vs,markdh,markdv,markss,markmm,timestepe,cyc0max,start_cond,veldepfric,stoksmod,res_high) \ private(mm1,mm2,tid,m10,m20,m1,m3,mpb,mtk,mro,mbb,maa,mcp,mkt,mht,mnu,mgg,mxmelt,mhlatent,mwa,wro,dmwa,mdi0,mdi1,mdro, \ celdx,celdy,swt,swt1,dx,dy,msxxe,msxye,mexxe,mexye,sigin,epsin,Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt,p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in) \ firstprivate(yn) { // Initialize temporarily interpolation arrays (capitilized) to zero (inside pragma, so is private) double* Nu0 = (double*) calloc(nodenum,sizeof(double)); double* Nd0 = (double*) calloc(nodenum,sizeof(double)); double* Gg0 = (double*) calloc(nodenum,sizeof(double)); double* Gd0 = (double*) calloc(nodenum,sizeof(double)); double* Ro0 = (double*) calloc(nodenum,sizeof(double)); double* Sxxe0 = (double*) calloc(nodenum,sizeof(double)); double* Sppe0 = (double*) calloc(nodenum,sizeof(double)); double* Sbritn0 = (double*) calloc(nodenum,sizeof(double)); double* Sxye0 = (double*) calloc(nodenum,sizeof(double)); double* Exxe0 = (double*) calloc(nodenum,sizeof(double)); double* Exye0 = (double*) calloc(nodenum,sizeof(double)); double* Et0 = (double*) calloc(nodenum,sizeof(double)); double* Dro0 = (double*) calloc(nodenum,sizeof(double)); double* Drp0 = (double*) calloc(nodenum,sizeof(double)); double* Cp0 = (double*) calloc(nodenum,sizeof(double)); double* Kt0 = (double*) calloc(nodenum,sizeof(double)); double* Ht0 = (double*) calloc(nodenum,sizeof(double)); double* Tk = (double*) calloc(nodenum,sizeof(double)); double* Mrx0 = (double*) calloc(nodenum,sizeof(double)); double* Mry0 = (double*) calloc(nodenum,sizeof(double)); double* Mvx0 = (double*) calloc(nodenum,sizeof(double)); double* Mvy0 = (double*) calloc(nodenum,sizeof(double)); double* Sol0 = (double*) calloc(nodenum*3,sizeof(double)); double* Sol1 = (double*) calloc(nodenum*3,sizeof(double)); #pragma omp for \ schedule(runtime) for (mm1=0;mm1<marknum;mm1+=gridmod) { /* Check markers out of grid */ if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markk[mm1]>0 && markt[mm1]<50) { tid=omp_get_thread_num(); m10=m1serch(markx[mm1]); m20=m2serch(marky[mm1]); /* Marker type */ mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100; #if setup>9 /* 1a. --- Remove water, rocks --- */ if(erosmod==0) { if(marky[mm1]>sedilev && mm2<2) { mm2=yn; markt[mm1]=yn; markw[mm1]=0; markd[mm1]=-1.0; } if(marky[mm1]<eroslev && mm2>1) { if((marky[mm1])>waterlev) markt[mm1]=1; else markt[mm1]=0; mm2=markt[mm1]; markw[mm1]=0; markd[mm1]=-1.0; } } /* 1b. Remove Plumes */ if(marky[mm1]>zdeep && mm2!=10) { mm2=10; markt[mm1]=10; markw[mm1]=0; markd[mm1]=-1.0; } #endif /* P, T parameters calc */ mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m10,m20); mtk=(markk[mm1]); /* Reset water/air temperature */ // if (mm2<2) mtk=markk[mm1]=273.0; /* 2.-3. --- Calculate density --- */ // & just points to address of normally defined variables at start of this routine; more efficient for passing! this way variable here can be changed inside subroutine #if setup>9 dencalcomp(mtk,mpb,markx[mm1],marky[mm1],mm2,&mro,&mbb,&maa); mcp=markcp[mm2]; mkt=(markkt[mm2]+markkf[mm2]/(mtk+77.0))*exp(markkp[mm2]*mpb); /* ========================= */ /* Mantle phase transitions */ /* ========================= */ if (densimod==1) { /* if(mm2>=9 && mm2<=14 && markex[mm1]>0) mro*=1.0-0.04*markex[mm1]; */ /* Eclogitization, St, Pv transitions in oceanic crust */ if(mm2==7 || mm2==8 ) { /* Eclogitization Ito and Kennedy, 1971 */ /*basalt=>garnet granulite (Ga-In) transition*/ p_ga_in=-9222.0+mtk*14.0; /*Not to have granulites at pressure lower than 2 kbar*/ if(p_ga_in<2000.0) p_ga_in=2000.0; /*garnet granulite=>eclogite (Pl-Out) transition*/ p_pl_out=-1460.0+mtk*20.0; /*Not to have eclogites at pressure lower than 12 kbar*/ if(p_pl_out<12000.0) p_pl_out=12000.0; if(mpb>p_ga_in) { rokf=0; if(mtk>teclmin) { if(mtk>teclmax) { rokf=0.16; } else { rokf=0.16*(mtk-teclmin)/(teclmax-teclmin); } } if(mpb>=p_pl_out) { mro*=1.0+rokf; } else { mro*=(1.0+rokf*(mpb-p_ga_in)/(p_pl_out-p_ga_in)); } } /* Coe->St transition Gerya et al., 2004, PCM */ p_st_in=59100.0+mtk*22.6; if(mpb>p_st_in) mro*=1.06; /* Pv transition, Mishin et al., 2008 with slope from Ito et al., 1990 */ /* Sp-out transition*/ p_sp_out=354000.0-mtk*40.0; /* Pv-in transition*/ p_pv_in=352000.0-mtk*40.0; if(mpb>p_pv_in) { rokf=0.08; if(mpb>=p_sp_out) { mro*=1.0+rokf; } else { mro*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in)); } } } /* Ol-Sp and Pv transitions in the mantle */ if(mm2>=9 && mm2<=14) { /* Ol-Sp transition, Katsura & Ito, 1989 */ /* Ol-out transition*/ p_ol_out=91000.0+mtk*27.0; /* Sp-in transition*/ p_sp_in=66000.0+mtk*39.0; /*Limit width of Sp-Ol transition to 2 kbar */ if(p_sp_in>p_ol_out-2000.0) p_sp_in=p_ol_out-2000.0; if(mpb>p_sp_in) { rokf=0.06; if(mpb>=p_ol_out) { mro*=1.0+rokf; } else { mro*=(1.0+rokf*(mpb-p_sp_in)/(p_ol_out-p_sp_in)); } } /* Pv transition, Ito et al., 1990 */ /* Sp-out transition*/ p_sp_out=304000.0-mtk*40.0; /* Pv-in transition*/ p_pv_in=302000.0-mtk*40.0; if(mpb>p_pv_in) { rokf=0.11; if(mpb>=p_sp_out) { mro*=1.0+rokf; } else { mro*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in)); } } } } /* ========================== end */ /* Test Heat conductivity k=ko/(1+b*(T-To)/To) */ if (markkt[mm2]<0) mkt=-markkt[mm2]/(1.0+markkf[mm2]*(mtk-markkp[mm2])/markkp[mm2]); mht=markht[mm2]; /* 4. Molten rocks */ // Note: Calls viscalc only for melted rocks here ! if (mm2>20) { meltpartomp(mtk,mpb,markx[mm1],marky[mm1],mm1,mm2,&mro,&mbb,&maa,&mnu,&mcp,&mkt,&mgg,&mxmelt,&mhlatent); } /* ~X Thermodynamic database use for water */ // ATAT QUESTION TARAS: I would suggest to remove this if-statement, since it is harldy use. Do you agree? To avoid what was it included? /* Density && Water wt% save */ // Hardly used; something that is larger than air or water in rock type number, but has negative density, at very start of model.. if ( densimod==3 && mm2>1 && (timesum<=1e+11 || markd[mm1]<=0) ) { // tdbasecalc(mtk,mpb,mm2,mm1); // markw[mm1]=eps[42]; // markd[mm1]=mro; // ATATOMP QUESTION TARAS: the above form of mro means that it comes from dencalcomp or meltpartomp, and not from tdbasecalc above that stored is as eps.. ; what you wanted ? // Use capital letters since Taras does not always assign eps value into this loop for m.. (eg mkt in next densimod2 call few lines below) // ATATOMP QUESTION TARAS: Is this intentionally? With what purpose ? eg for mkt in next densimod2 call few lines below tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m10,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt); markw[mm1]=Mwa; markd[mm1]=mro; } #elif setup<10 // In lab have constant density per rocktype and no thermal evolution, so much faster mro=markro[mm2]; #endif /* ---> (3) Marker rheology: calculate viscosity and stresses <--- */ mdi0=0; mdi1=1.0; #if setup>9 if(mm2<=20) #endif { // yn = 1 now means that plasticity IS executed in this routine call to viscalc! This is the only successfull call within current code for normal, non-melting rocks ! viscalcomp(mtk,mpb,markx[mm1],marky[mm1],markv[mm1],markwa[mm1],markk[mm1],markp[mm1],markt[mm1],markexx[mm1],markexy[mm1],markxx,markxy,marke,mm1,mm2,1,m10,&mnu,&mdi0); /* XXX Density correction for the dilation angle XXX = not executed */ if(markf0[mm2]>0 && markf1[mm2]>0 && marke[mm1]>0) { /* Second invariant of viscoplastic strain calc, check */ sigin=pow(markxx[mm1]*markxx[mm1]+markxy[mm1]*markxy[mm1],0.5); epsin=marke[mm1]-sigin/2.0/markgg[mm2]; if(epsin>markf1[mm2]) epsin=markf1[mm2]; if(epsin>0) mdi1=exp(-2.0*epsin*markf0[mm2]); } } msxxe=markxx[mm1]; msxye=markxy[mm1]; mexxe=markexx[mm1]; mexye=markexy[mm1]; mgg=markgg[mm2]; /* Min,Max NU limitation */ if(mnu<nubeg) mnu=nubeg; if(mnu>nuend) mnu=nuend; /* Water/Air account */ #if setup>9 if(mm2<2) { markd[mm1]=mro; mdi0=0; mdi1=1.0; } #endif /* End Water/Air account */ /* Calc log density derivative, save new density */ if(markd[mm1]<=0 || densimod==0) {markd[mm1]=mro; mdi0=0;} mdro=0; maa=0; mdi1=1.0; #if setup>9 if(timestepe) { mdro=mro/markd[mm1]; mdro=log(mdro)-mdi0; //if(epsin>0 && debugmod) {fprintf(fp_log,"d %ld %d %e %e %e %e %e %e %e %e %e %e",mm1,mm2,markx[mm1],marky[mm1],marke[mm1]*2.0*markgg[mm2],sigin,marke[mm1],epsin,-2.0*epsin*markf0[mm2],markd[mm1],mro,mdro);getchar();} } #endif /* Save new density */ mdro=-mdi0; markd[mm1]=mro; /* Correct new density for dilation */ mro*=mdi1; /* Saving marker viscosity */ markv[mm1]=mnu; // if(debugmod) {fprintf(fp_log,"num=%ld type=%d x=%e y=%e mpb=%e mtk=%e nu=%e ro=%e cp=%e kt=%e ht=%e",mm1,mm2,markx[mm1],marky[mm1],mpb,mtk,mnu,mro,mcp,mkt,mht);getchar()}; /* --> (4) Interpolation from markers to 4 corners of the cell ====================================*/ /* Marker weight calculation using dimension of current Cell */ celdx=gx[m10+1]-gx[m10]; celdy=gy[m20+1]-gy[m20]; swt1=1.0/celdx/celdy; /* Marker weights calculation using dimension of current Cell */ celdx=(markx[mm1]-gx[m10])/(gx[m10+1]-gx[m10]); celdy=(marky[mm1]-gy[m20])/(gy[m20+1]-gy[m20]); if (celdx<0 || celdy<0 || celdx>1.0 ||celdy>1.0) {fprintf(fp_log," WARNING !!! num=%ld type=%d x=%e y=%e celdx=%e celdy=%e",mm1,mm2,markx[mm1],marky[mm1],celdx,celdy); fflush(fp_log); getchar();} /* --- Interpolate ro,nu etc to nodes using interpolation coefficients --- */ for (m1=0;m1<4;m1++) { /* Marker weight calculation using dimension of current Cell */ /* Different corners */ /* 0 2 */ /* 1 3 */ switch(m1) { case 0: /* Calc node number */ m3=m10*ynumy+m20; /* Add shear viscosity Nu */ if (celdx<0.5 && celdy<0.5) { dx=1.0-2.0*celdx; dy=1.0-2.0*celdy; swt=swt1*dx*dy; Nu0[m3]+=mnu*swt; Gg0[m3]+=mnu/mgg*swt; Sxye0[m3]+=msxye*swt; Exye0[m3]+=mexye*swt; Sol0[nodenum+m3]+=swt; } /* Add Vx and Mx from markers */ if (celdx<0.5) { dx=1.0-celdx; dy=1.0-ABSV(celdy-0.5); swt=swt1*dx*dy; Mvx0[m3]+=markvx[mm1]*mro*swt; Mrx0[m3]+=mro*swt; Sol0[nodenum2+m3]+=swt; } /* Add Vy and My from markers */ if (celdy<0.5) { dx=1.0-ABSV(celdx-0.5); dy=1.0-celdy; swt=swt1*dx*dy; Mvy0[m3]+=markvy[mm1]*mro*swt; Mry0[m3]+=mro*swt; Sol1[nodenum2+m3]+=swt; } // Calculate standard weight for physical properties swt=swt1*(1.0-celdx)*(1.0-celdy); break; case 1: /* Calc node number */ m3=m10*ynumy+m20+1; /* Add shear viscosity Nu */ if (celdx<0.5 && celdy>0.5) { dx=1.0-2.0*celdx; dy=2.0*celdy-1.0; swt=swt1*dx*dy; Nu0[m3]+=mnu*swt; Gg0[m3]+=mnu/mgg*swt; Sxye0[m3]+=msxye*swt; Exye0[m3]+=mexye*swt; Sol0[nodenum+m3]+=swt; } /* Add Vy and My from markers */ if (celdy>0.5) { dx=1.0-ABSV(celdx-0.5); dy=celdy; swt=swt1*dx*dy; Mvy0[m3]+=markvy[mm1]*mro*swt; Mry0[m3]+=mro*swt; Sol1[nodenum2+m3]+=swt; } // Calculate standard weight for physical properties swt=swt1*(1.0-celdx)*celdy; break; case 2: /* Calc node number */ m3=(m10+1)*ynumy+m20; /* Add shear viscosity Nu, Sxy */ if (celdx>0.5 && celdy<0.5) { dx=2.0*celdx-1.0; dy=1.0-2.0*celdy; swt=swt1*dx*dy; Nu0[m3]+=mnu*swt; Gg0[m3]+=mnu/mgg*swt; Sxye0[m3]+=msxye*swt; Exye0[m3]+=mexye*swt; Sol0[nodenum+m3]+=swt; } /* Add Vx and Mx from markers */ if (celdx>0.5) { dx=celdx; dy=1.0-ABSV(celdy-0.5); swt=swt1*dx*dy; Mvx0[m3]+=markvx[mm1]*mro*swt; Mrx0[m3]+=mro*swt; Sol0[nodenum2+m3]+=swt; } // Calculate standard weight for physical properties swt=swt1*celdx*(1.0-celdy); break; case 3: /* Calc node number */ m3=(m10+1)*ynumy+m20+1; /* Add shear viscosity Nu */ if (celdx>0.5 && celdy>0.5) { dx=2.0*celdx-1.0; dy=2.0*celdy-1.0; swt=swt1*dx*dy; Nu0[m3]+=mnu*swt; Gg0[m3]+=mnu/mgg*swt; Sxye0[m3]+=msxye*swt; Exye0[m3]+=mexye*swt; Sol0[nodenum+m3]+=swt; } // Add values to central node once // Brackets determine the scope of the to here limited variables, but currently make no difference { dx=1.0-2.0*ABSV(celdx-0.5); dy=1.0-2.0*ABSV(celdy-0.5); swt=swt1*dx*dy; Nd0[m3]+=mnu*swt; Gd0[m3]+=mnu/mgg*swt; Sxxe0[m3]+=msxxe*swt; Sppe0[m3]+=markp[mm1]*swt; Sbritn0[m3]+=msbrit[mm1]*swt; // Yield stress in the pressure node Exxe0[m3]+=mexxe*swt; Dro0[m3]+=mdro*swt; Drp0[m3]+=maa*swt; Sol1[nodenum+m3]+=swt; } // Calculate standard weight for physical properties swt=swt1*celdx*celdy; break; } // End switch of weight calculation /* Add Physical Properties: ro,nu, etc. */ // fprintf(fp_log,"num=%ld type=%d x=%e y=%e cell=%ld dx=%e dy=%e swt=%e",mm1,mm2,markx[mm1],marky[mm1],m3,dx,dy,swt);getchar(); // nu0[m3]+=mnu*swt; // ATAT TARAS Why use mcp*MRO? in routines calculate purely from markcp... not done in manuele's, later in heat mrocp, but unrelated Ro0[m3]+=mro*swt; Et0[m3]+=mbb*swt; Cp0[m3]+=mcp*mro*swt; Kt0[m3]+=mkt*swt; Ht0[m3]+=mht*swt; Sol0[m3]+=swt; /* Add T */ if(!markim[mm2]) { Tk[m3]+=mtk*swt; Sol1[m3]+=swt; } } /* End Interpolation from markers to nodes ====================================*/ } } // Add interpolation arrays from different processors and free their memory #pragma omp critical (sumsolarrays) { for (m3=0;m3<nodenum;m3++) { nu0[m3]+=Nu0[m3]; nd0[m3]+=Nd0[m3]; gd0[m3]+=Gd0[m3]; gg0[m3]+=Gg0[m3]; ro0[m3]+=Ro0[m3]; cp0[m3]+=Cp0[m3]; kt0[m3]+=Kt0[m3]; ht0[m3]+=Ht0[m3]; dro0[m3]+=Dro0[m3]; drp0[m3]+=Drp0[m3]; sxxe0[m3]+=Sxxe0[m3]; sxye0[m3]+=Sxye0[m3]; sppe0[m3]+=Sppe0[m3]; sbritn0[m3]+=Sbritn0[m3]; exxe0[m3]+=Exxe0[m3]; exye0[m3]+=Exye0[m3]; et0[m3]+=Et0[m3]; tk0[m3]+=Tk[m3]; mrx0[m3]+=Mrx0[m3]; mry0[m3]+=Mry0[m3]; mvx0[m3]+=Mvx0[m3]; mvy0[m3]+=Mvy0[m3]; sol0[m3]+=Sol0[m3]; sol1[m3]+=Sol1[m3]; sol0[nodenum+m3]+=Sol0[nodenum+m3]; sol1[nodenum+m3]+=Sol1[nodenum+m3]; sol0[nodenum2+m3]+=Sol0[nodenum2+m3]; sol1[nodenum2+m3]+=Sol1[nodenum2+m3]; } } // Free dynamically allocated interpolation arrays free(Nu0); free(Nd0); free(Gg0); free(Gd0); free(Ro0); free(Sxxe0); free(Sppe0); free(Sbritn0); free(Sxye0); free(Exxe0); free(Exye0); free(Et0); free(Dro0); free(Drp0); free(Cp0); free(Kt0); free(Ht0); free(Tk); free(Mrx0); free(Mry0); free(Mvx0); free(Mvy0); free(Sol0); free(Sol1); } // End OMP section marker to node interpolation #if setup>9 if (n0==1){fclose(flfric);} #endif if (printmod==10000) fprintf(fp_log,"\n Time taken for rho and vis calc + M->N1 in ronurecalc = %e s \n",omp_get_wtime()-start); start=omp_get_wtime(); /* Recalculate ro[] nu[] */ for (m1=0;m1<xnumx;m1++) { for (m2=0;m2<ynumy;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; /* Shear viscosity recalc check */ if(sol0[nodenum+m3]) { // Boundary Condition Viscosity (set in mu) if(mu[m3] && (timesum<timebond || m1<=2 || m2<=2 || m1>=xnumx-4 || m2>=ynumy-3)) { // BC value defined in init.t3c if(mu[m3]>0) { nu0[m3]=mu[m3]; } else { nu0[m3]/=sol0[nodenum+m3]; if(nu0[m3]>-mu[m3]) nu0[m3]=-mu[m3]; } } // Rest; solution else { nu0[m3]/=sol0[nodenum+m3]; } /* Min,Max NU limitation */ if(nu0[m3]<nubeg) nu0[m3]=nubeg; if(nu0[m3]>nuend) nu0[m3]=nuend; /* Min,Max NU definition for nu contrast limit */ if(numin==0 || nu0[m3]<numin) numin=nu0[m3]; if(numax==0 || nu0[m3]>numax) numax=nu0[m3]; nu[m3]=nu0[m3]; /* Elastic shear stress Sxy recalc */ sxye[m3]=sxye0[m3]/sol0[nodenum+m3]; exye[m3]=exye0[m3]/sol0[nodenum+m3]; /* Shear shear modulus recalc */ gg[m3]=nu[m3]/(gg0[m3]/sol0[nodenum+m3]); /* Reset weight */ sol0[nodenum+m3]=0; } /* Normal viscosity recalc check */ if(sol1[nodenum+m3]) { if(mu[m3] && (timesum<timebond || m1<=2 || m2<=2 || m1>=xnumx-4 || m2>=ynumy-3)) { if(mu[m3]>0) { nd0[m3]=mu[m3]; } else { nd0[m3]/=sol1[nodenum+m3]; if(nd0[m3]>-mu[m3]) nd0[m3]=-mu[m3]; } } else { nd0[m3]/=sol1[nodenum+m3]; } /* Min,Max NU limitation */ if(nd0[m3]<nubeg) nd0[m3]=nubeg; if(nd0[m3]>nuend) nd0[m3]=nuend; /* Min,Max NU definition for nu contrast limit */ if(numin==0 || nd0[m3]<numin) numin=nd0[m3]; if(numax==0 || nd0[m3]>numax) numax=nd0[m3]; nd[m3]=nd0[m3]; /* Elastic Normal stress recalc */ sxxe[m3]=sxxe0[m3]/sol1[nodenum+m3]; sppe[m3]=sppe0[m3]/sol1[nodenum+m3]; sbritn[m3]=sbritn0[m3]/sol1[nodenum+m3]; exxe[m3]=exxe0[m3]/sol1[nodenum+m3]; /* Density changes recalc */ dro[m3]=dro0[m3]/sol1[nodenum+m3]; drp[m3]=drp0[m3]/sol1[nodenum+m3]; /* Normal shear modulus recalc */ gd[m3]=nd[m3]/(gd0[m3]/sol1[nodenum+m3]); /* Reset weight */ sol1[nodenum+m3]=0; } /* Vx Mx recalc check */ if(sol0[nodenum2+m3]) { /* Material constants recalc */ mvx[m3]=mvx0[m3]/mrx0[m3]; mrx[m3]=mrx0[m3]/sol0[nodenum2+m3]; sol0[nodenum2+m3]=0; } /* Vy My recalc check */ if(sol1[nodenum2+m3]) { /* Material constants recalc */ mvy[m3]=mvy0[m3]/mry0[m3]; mry[m3]=mry0[m3]/sol1[nodenum2+m3]; sol1[nodenum2+m3]=0; } /* Other variables recalc check */ if(sol0[m3]) { /* Material constants recalc */ ro[m3]=ro0[m3]/sol0[m3]; #if setup>9 if(gy[m2]<waterlev && ro[m3]<1000.1) ro[m3]=1.0; if(gy[m2]>=waterlev && ro[m3]<1000.1) ro[m3]=1000.0; #endif et[m3]=et0[m3]/sol0[m3]; cp[m3]=(cp0[m3]/sol0[m3])/ro[m3]; kt[m3]=kt0[m3]/sol0[m3]; ht[m3]=ht0[m3]/sol0[m3]; /* Advective addition for T K in nodes recalc */ if (sol1[m3]) { tk[m3]=tk0[m3]/sol1[m3]; sol1[m3]=0; } /* Reset weight */ sol0[m3]=0; } } } if (printmod) fprintf(fp_log,"Min, Max viscosity %e %e \n",numin,numax); fflush(fp_log); /* Reset advective temperature */ for (m3=0;m3<nodenum;m3++) {tk3[m3]=0;} /* Check Upper/Lower limits for nu[] after given contrast */ if(nucontr>1.0 && numin>0) numax=numin*nucontr; if(nucontr<1.0 && numax>0) numin=numax*nucontr; for (m3=0;m3<nodenum;m3++) { if(nu[m3]<numin) nu[m3]=numin; if(nu[m3]>numax) nu[m3]=numax; if(nd[m3]<numin) nd[m3]=numin; if(nd[m3]>numax) nd[m3]=numax; } /* Water/air density */ #if setup>9 for (m1=0;m1<xnumx;m1++) for (m2=0;m2<ynumy;m2++) { m3=m1*ynumy+m2; if(gy[m2]<waterlev && ro[m3]<1000.1) ro[m3]=1.0; if(gy[m2]>=waterlev && ro[m3]<1000.1) ro[m3]=1000.0; } #endif /* ---> 5. Set Boundary conditions for T <---*/ if (printmod) fprintf(fp_log,"\n AVERAGE TEMPERATURE CORRECTION FOR BOUNDARY CONDITIONS ...\n"); fflush(fp_log); tkrecalc(); if (printmod) fprintf(fp_log,"AVERAGE TEMPERATURE OK!\n"); fflush(fp_log); /* Adiabate computing */ if(1==0 && timesum<3.15576e+7*1e+3) { /* Lower boundary TK - Node Cycle */ for (m1=0;m1<xnumx;m1++) { /* Cur Line Num in bondm[] */ m2=(m1+1)*ynumy-1; m3=bondm[m2+nodenum3]; if(m3) {bondv[m3][0]=tk[m2-1]*2.0-tk[m2-2];} } } if (printmod==10000) fprintf(fp_log,"\n Time taken for M->N2 in ronurecalc = %e s \n",omp_get_wtime()-start); /* END ADD MARKERS TO THE v-CELLS ========================== */ } /* End ro[],nu[] recalc after marker positions - routine */ /* Calc density for given P,T */ void dencalcomp(double mtk, double mpb, double x, double y, int mm2, double *mro, double *mbb, double *maa) /* mtk - T, K */ /* mpb - P, bar */ /* x,y - XY location of point for Vx,Vy calc */ /* mm2 - Rock number */ { /* Ro=ro0*(1-bro*(TK-298.15))*(1+aro*(Pkbar-0.001)) */ *mro=markro[mm2]*(1.0-markbb[mm2]*(mtk-298.15))*(1.0+markaa[mm2]*(mpb-1.0)*1e-3); /* Adiabatic term: al=bro/(1-bro*(Tk-298.15)) */ *mbb=markbb[mm2]/(1.0-markbb[mm2]*(mtk-298.15)); /* Compressibility: be=aro/(1+aro*(Pkbar-0.0001) */ *maa=1.e-8*markaa[mm2]/(1.0+markaa[mm2]*(mpb-1.0)*1e-3); /* Constant density */ if (densimod==0) *mro=markro[mm2]; } /* End OMP Calc density for given P,T */ /* OMP Antigorite weakening of mantle */ void antigoromp(double mtk, double mpb, double x, double y, long int mm1, long int m10, char markt[]) /* mtk - T, K */ /* mpb - P, bar */ /* x,y - XY location of point for Vx,Vy calc */ /* mm1 - mark number */ /* m10 - Up Left Node X,Y Num */ { /* Val buffer */ double k1,sy1,e,hydry,yfiltr,hydryl,tsubd,vxs,vys; /* Check marker type */ if(markt[mm1]!=11 && markt[mm1]!=13) return; /* Relativ Normalized coord Calc */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); /* Erosion surface; oceanic crust top */ sy1=(e*ep[m10+1]+(1.0-e)*ep[m10]); /* Antigorite weakening of mantle above oceanic crust */ /* Atg stability field after Schmidt and Poli, 1998 */ if((y-sy1)>63000.0) { k1=1013.17699-0.060387633e-3*(y-sy1)-0.004289442e-6*(y-sy1)*(y-sy1); } else { k1=751.490422+6.00773668e-3*(y-sy1)-0.034690759e-6*(y-sy1)*(y-sy1); } /* Change marker Type */ /* Serpentinized (13) - to hydrated (11) */ if(k1<=mtk && markt[mm1]==13) markt[mm1]=11; /* Hydrated(11) - to serpentinized (13) */ if(k1>mtk && markt[mm1]==11) markt[mm1]=13; } /* OMP End Antigorite weakening of mantle */ /* Nu calc after reological equation */ // Uses timestepe or computational visco-elastic timestep /* P-T-stress dependent rheology without/with brittle/ductile transition */ /* Reological equations */ /* Stress>SScr */ /* Power law dislocation creep: SSii={NU0*EEii*exp[(E+PV)/RT]}^(1/n) */ /* Effective viscosity: NU=1/2*{NU0*exp[(E+PV)/RT]}^(1/n)*EEii^[(1-n)/n] */ /* Stress<SScr */ /* Newtonian diffusion creep: SSii=NU1*EEii*exp[(E+PV)/RT] */ /* Effective viscosity: NU=NU0/2*exp[(E+PV)/RT] */ /* NU1=NU0/SScr^(n-1) */ /* SScr - dislocation, diffusion transition stress */ /* SSii - second invariant of deviatoric stress tensor */ /* EEii - epsin - second invariant of strain rate tensor */ /* E - activation energy, J */ /* V - activation volume, J/bar */ /* R - gase constant 8.314 J/K */ /* Viscosity NU calc after reological equations */ /* NU=SSii/(2*EEii) */ /* Brittle - Ductile transition */ /* sbrit=MINV(0.85e+5*pb,60e+6+0.6e+5*pb)*lambda; (Schott & Schmeling, 1998) */ /* sbrit=MINV(0.667e+5*pb,51.2e+6+0.512e+5*pb)*lambda; (Brace & Kohlsstedt, 1980) */ void viscalcomp(double mtk, double mpb, double cmx, double cmy, double Markv, double Markwa, double Markk, double Markp, double Markt, double Markexx, double Markexy, double Markxx[], double Markxy[], double Marke[], long int mm1, int mm2, int yn, long int m10,double *mnu, double *mdi0) /* mtk - T, K */ /* mpb - P, bar */ /* cmx,cmy - XY location of point for Vx,Vy calc */ /* mm1 - Marker number */ /* mm2 - rock type */ /* yn - plastic reset yes(1)/no(0) - switch from version 1 to 2 ! */ // bbrit_cor - slip velocity dependent correction for friction coefficient { /* Val buffer */ double xnu,nnu,e,n,rt=8.314*mtk,k1,e1,epsin,sigin,sduct,sbrit,nueff,strain,abrit,bbrit,nubrit,nunewt,nupowl,nuduct; /* Reological Eq par */ double sy1,lamb,xelvis,sxxnew,sxynew,siginnew,mnu0,mnu1,mnu2,siginnew0,siginnew1,siginnew2,dsiginnew0,dsiginnew1,dsiginnew2; /* Counters */ long int m1; int ncount=0; // Slip velocity dependent friction double relvw=60,dvw,dvs; /* Melted rocks */ // But incoming mm2 is already mm2_actual-20 #if setup>9 if (mm2>20) { *mnu = markn0[mm2]; return; } #endif /* Non-melted rocks, mm2 <= 20 */ /* Calc effective strain rate, stress after second strain rate Tenzor invariant EEii=(1/2SUM(EPSik^2))^(1/2) */ // Interpolation to these markers done at end of viterate() of previous timestep epsin=pow(Markexx*Markexx+Markexy*Markexy,0.5); sigin=pow(Markxx[mm1]*Markxx[mm1]+Markxy[mm1]*Markxy[mm1],0.5); /* --- 1. Calculate components of brittle strength; cohesion, friction, and hydrostatic pore pressure weakening factor --- */ // - Lambda brittle weakening factor for hydrostatic pore pressure - /* Up Left Node X,Y Num */ m1=m10; /* Relative normalized coord calc */ e=(cmx-gx[m1])/(gx[m1+1]-gx[m1]); n=(e*ep[m1+1]+(1.0-e)*ep[m1]); // Pore fluid pressure correction: lamb = Pf/Ps lamb=markll[mm2]; #if setup>9 // Predefine fluid pressures near surface if ((cmy-n)<=0) lamb=hidrl; if ((cmy-n)>0 && (cmy-n)<hidry) lamb=hidrl*(1.0-(cmy-n)/hidry)+lamb*(cmy-n)/hidry; // Lower friction in fluid/melt present areas if (Markwa==1) {lamb=lambfld;} #endif /* - Strain weakening - */ strain=Marke[mm1]; /* A,B coefficients calc depending on integral strain */ abrit=marka0[mm2]; bbrit=markb0[mm2]; if(strain>marke1[mm2]) { abrit=marka1[mm2]; bbrit=markb1[mm2]; } else { if(strain>marke0[mm2] && marke1[mm2]>marke0[mm2]) { abrit=marka0[mm2]+(marka1[mm2]-marka0[mm2])*(strain-marke0[mm2])/(marke1[mm2]-marke0[mm2]); bbrit=markb0[mm2]+(markb1[mm2]-markb0[mm2])*(strain-marke0[mm2])/(marke1[mm2]-marke0[mm2]); } } /* --- End calculation of brittle strength components; cohesion, friction, and hydrostatic pore pressure weakening factor --- */ /* --- Start Peierl's creep viscosity calculation --------------------------------------*/ /** * \brief Calculates Peierls's creep viscosity. * * @param[in] mtk * @param[in] mm2 * @param[in] epsin * @param[in] sigin * * @param[out] siginnew * @param[out] nupeierls * @return The calculated Peierl's creep viscosity */ /* Peierls plasticity-creep mechanism, data from Katayama and Karato, 2008 */ double nupeierls = 0; int peierls_on = 0; /* * from function arguments: mm2, mtk, * declared previously: sigin, epsin */ if (global_peierls_creep && mm2 > 1 && mm2 < 20 && sigin > 1e+8 && mtk < 1373.0 && epsin > 0) { int n1 = 9; peierls_on = 1; /* Constant f(p,q), 1/s/MPa^2 */ double const A = pow(10.0,7.8) * 1e-12; /* 9.1e+9 Dry Peierls stress at 0 K f(p,q), MPa Evans & Goetze, 1979 */ double sig0 = 9.1e+9; if (0==1 && mm2!=9 && mm2!=10 && mm2!=14) { /* Wet Peierls stress at 0 K f(p,q), MPa * Katayama & Karato, 2008 */ sig0 = 2.9e+9; n1 = 11; } /* Using bisection */ double sigmin = 1e+6; double sigmax = sig0 * 0.9999; /* * global variable, in OMP pragma: markdh, markdv, mpb */ double const k1min = A * pow(sigmin, 2.0) * exp(MAXV(-100.0,-(markdh[n1] + markdv[n1] * mpb) / rt * pow(1.0 - sigmin / sig0, 2.0))); for (int n2 = 0; n2 < 15; n2++) { siginnew = (sigmax + sigmin) / 2.0; /* siginnew=pow(sigmax*sigmin,0.5); */ k1 = A * pow(siginnew, 2.0) * exp(MAXV(-100, -(markdh[n1] + markdv[n1] * mpb) / rt * pow(1.0 - siginnew / sig0, 2.0))); if((k1<epsin && k1min<epsin) || (k1>epsin && k1min>epsin)) { sigmin = siginnew; } else { sigmax = siginnew; } /* * p=1.0; * q=2.0; * k1 = A * pow(siginnew,2.0)*exp(-(markdh[n1]+markdv[n1]*mpb)/rt*pow(1.0-pow(siginnew/sig0,p),q)); * getchar(); * printf("%d %ld %d %e %e %e %e %e %e %e %e %e %e",n2,mm1,mm2,mtk,mpb,sigin,epsin,siginnew,k1,k1min,k1max,sigmin,sigmax);getchar(); * */ //printf("%ld %d %e %e %e %e %e %e",mm1,mm2,mtk,mpb,sigin,epsin,siginnew,1.0/nupeierls); //static double const p = 1.0, q = 2.0; //printf("%d %ld %d %e %e %e %e %e %e %e %e %e %e",n2,mm1,mm2,mtk,mpb,sigin,epsin,siginnew,k1,k1min,k1max,sigmin,sigmax); } //nupeierls = 1.0 / (0.5 * siginnew / epsin); // try not using the inverse viscosity nupeierls = (0.5 * siginnew / epsin); /* * if(nupeierls>1e-23 && (markx[mm1]<5e+4 || markx[mm1]>xsize-5e+4)) nupeierls=1e-23; */ } /* --- End Peierl's creep viscosity calculation --------------------------------------*/ /* --- Start ductile viscosity calculation -------------------------------------------*/ /* Inverted value of newtonian NU set */ nunewt=0; /* Inverted value of power-low NU set */ nupowl=0; /* Check for the presence of ductile rheology */ // For more viscosity options, see codes of version 1 if (marknu[mm2]) { /* A) Simple Newtonian rheology */ // - used in laboratory model of van Dinther et al., JGR, 2013a - /* Newtonian creep: SSii=NU0*2.0*EEii */ /* Effective viscosity: NU=NU0 */ /* Effective viscosity member in Stoks: NUs=NU */ if(markdh[mm2]==0 && markdv[mm2]==0 && (markss[mm2]==0 || markmm[mm2]==1.0)) { /* Inverted value of newtonian NU calc */ nunewt=1.0/marknu[mm2]; } /* --> D) P-T-stress dependent rheology without/with brittle/ductile transition <--*/ // - used in large-scale models PhD thesis van Dinther - /* Reological equations */ /* Stress>SScr */ /* Power law dislocation creep: SSii={NU0*EEii*exp[(E+PV)/RT]}^(1/n) */ /* Effective viscosity: NU=1/2*{NU0*exp[(E+PV)/RT]}^(1/n)*EEii^[(1-n)/n] */ /* Effective viscosity member in Stoks: NUs=NU/n */ /* Stress<SScr */ /* Newtonian diffusion creep: SSii=NU1*EEii*exp[(E+PV)/RT] */ /* Effective viscosity: NU=NU0/2*exp[(E+PV)/RT] */ /* Effective viscosity member in Stoks: NUs=NU */ /* NU1=NU0/SScr^(n-1) */ if(marknu[mm2]>0 && (markdh[mm2]!=0 || markdv[mm2]!=0) && markss[mm2]!=0 && markmm[mm2]!=1.0) { // ---> 2. Calculate ductile viscosity <--- /* T-P exponent for effective NU calc */ e1=(markdh[mm2]+markdv[mm2]*mpb)/rt; if(e1>150.0) e1=150.0; e1=exp(e1); /* Koef for stress independent creep NU1 calc */ k1=marknu[mm2]/pow(markss[mm2],markmm[mm2]-1.0); /* Inverted value of newtonian NU calc for diffusion creep */ nunewt=1.0/(0.5*k1*e1); mnu2=nunewt; /* Effective viscosity1 calc */ siginnew1=siginnew=sigin; nupowl=0; // Calculate dislocation creep viscosity if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2])); mnu1=nupowl; //Take arithmetic average of dislocation and diffusionc creep for effective ductile viscosity mnu0=1.0/(mnu1+mnu2); // ---> 3. Include elastic part for estimation future viscoelastic stresses <--- // Calculate visco-elasticity factor xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0); // Calculate viscoelastic stress siginnew2=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis); dsiginnew1=siginnew2-siginnew1; /* Effective viscosity2 calc */ // See above for description. Repeated here siginnew=siginnew2; nupowl=0; if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2])); mnu1=nupowl; mnu0=1.0/(mnu1+mnu2); // Calculate visco-elasticity factor xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0); // Calculate viscoelastic stress siginnew=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis); dsiginnew2=siginnew-siginnew2; /* ---> 4. Local iterations for dislocation viscosity calculation by Bisection method <--- */ ncount=0; // Locally iterate over nupowl and siginnew until siginnew-siginnew0<10 Pa (or 100 iterations) do { // Check to prevent num issue when stress is not changing: only not true and in if sigma_2_1 = sigma_1_1 = sigma_0 : almost never no stress change? dsiginnew0=ABSV(dsiginnew1)+ABSV(dsiginnew2); if(dsiginnew0>0) { // Weigth factor: 0.5 = midpoint dsiginnew0=0.5; // Calculate midpoint = new estimate stress siginnew0=siginnew=siginnew1*(1.0-dsiginnew0)+siginnew2*dsiginnew0; // Update viscosity with that new stress nupowl=0; if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2])); mnu1=nupowl; mnu0=1.0/(mnu1+mnu2); // Update stress estimate with new viscosity xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0); siginnew=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis); // Calculate difference new and last stress estimate -> converging? dsiginnew0=siginnew-siginnew0; // Use this newest estimate for stress change to see if in same direction // If yes; keep going in that direction; leave oldest estimate behind if((dsiginnew0>=0 && dsiginnew1>=0) || (dsiginnew0<0 && dsiginnew1<0)) { siginnew1=siginnew0; dsiginnew1=dsiginnew0; } // If in opposite direction; passed optimal stress so turn back; leave newest 1 estimate behind else { siginnew2=siginnew0; dsiginnew2=dsiginnew0; } } ncount++; } while(ABSV(dsiginnew0)>10.0 && ncount<101); } } /* --- End Ductile viscosity calculation -------------------------------------------*/ // Check ductile effective viscosity calculation nueff = 1.0 / (nunewt + nupowl); // Add Peierl's creep viscosity limiter if (peierls_on && nupeierls < nueff) nueff = nupeierls; /* Mantle viscosity */ #if setup > 9 if((Markt==9 || Markt==10) && timesum<3.15576e+7*1e+4 && nueff<1e+20) nueff=1e+20; #endif if (nueff <nubeg) { nueff = nubeg; } if (nueff > nuend) { nueff = nuend; } if (nueff <markn0[mm2]) { nueff = markn0[mm2]; } if (nueff > markn1[mm2]) { nueff = markn1[mm2]; } nuduct=nueff; *mdi0=0; /* ------------------ Calculate viscoplastic viscosity ---------------------------- */ // Calculate brittle strength - sbrit - // Plasticity switched off when both terms in the yield strength formulation are 0 if(((1-markll[mm2])*markb0[mm2] || abrit) && epsin) { // --- Strong slip velocity dependency of friction coefficient --- // After Burridge and Knopoff (1967), Ampuero and Ben-Zion (2008), etc. // Adapt friction parameters based on x-location (lab) or temperature (large-scale) #if setup==10 if (mm2==7 && cmx>(700e3-shift_km) && cmx<(1150e3-shift_km) && cmy<100e3 && veldepfric==1) #endif #if setup==11 // Including off-megathrust rate weakening if (cmx>(700e3-shift_km) && cmx<(1150e3-shift_km) && cmy<100e3 && veldepfric==1) #endif #if setup==12 // Collisional setup - L. Dal Zilio if (cmx>(1700e3-shift_km) && cmx<(2150e3-shift_km) && cmy<100e3 && veldepfric==1) #endif #if setup < 10 if (mm2==5 && veldepfric==1) #endif { // Calculate Relative amount of Velocity-Weakening vs Velocity-Strengthening #if setup>9 // Velocity-strengthening region if (Markk<=tk_updipsez0) // && mm2==7) { relvw = 0; } // Transitions to seismogenic zone: updip else if (Markk>tk_updipsez0 && Markk<tk_updipsez1) // && mm2==7) { relvw = (Markk-tk_updipsez0)/(tk_updipsez1-tk_updipsez0); } // Velocity-weakening for Seismogenic Zone (and off-megathrust region) else { relvw = 1;} // Note for the off-events setup there is no strengthening outside the subduction channel of basaltic crust #elif setup < 10 // Change mm2 locally in this viscalc-routine, so that also for viscosity, shear modulus, Pf/Ps etc use this // Seismogenic Zone = velocity-weakening if (cmx >= end_updip && cmx <= start_downdip) { relvw = 1; mm2 = 6; } // Transitions to seismogenic zone: updip else if (cmx >= start_updip && cmx <= end_updip) { relvw = (cmx-start_updip)/(2*half_range);} // Transitions away from seismogenic zone: downdip else if (cmx >= start_downdip && cmx <= end_downdip) { relvw = 1 - ( cmx-start_downdip )/( 2*half_range );} // Velocity-strenghtening region else { relvw = 0; } #endif // Calculate slip-rate dependent change of coefficients mvslip[mm1] = 2.0*epsin*res_high; dvw = (1-mgamma_vw)+mgamma_vw/(1.0+mvslip[mm1]/mvc_vw); dvs = (1-mgamma_vs)+mgamma_vs/(1.0+mvslip[mm1]/mvc_vs); // Change friction coefficient accordingly bbrit = mus_vs*dvs + relvw*(markb0[mm2]*dvw-mus_vs*dvs); // Change cohesion as a function of slip velocity, if desired (if marka0[5]=~marka0[6]) #if setup>9 abrit = marka0[mm2]; #elif setup < 10 abrit = marka0[5] + relvw*(marka0[6]-marka0[5]); #endif // Iterate locally to obtain stable estimate of slip-rate sbrit=abrit+bbrit*(1-lamb)*Markp; if(sbrit>0 && Markv>0) { for(ncount=0;ncount<5;ncount++) { if(sbrit>0) { // epsin = 0.5* sbrit/eta in viscous formulation mvslip[mm1] = sbrit/Markv*res_high; dvw = (1-mgamma_vw)+mgamma_vw/(1.0+mvslip[mm1]/mvc_vw); dvs = (1-mgamma_vs)+mgamma_vs/(1.0+mvslip[mm1]/mvc_vs); bbrit = mus_vs*dvs + relvw*(markb0[mm2]*dvw-mus_vs*dvs); sbrit=abrit+bbrit*(1-lamb)*Markp; } else { fprintf(fp_log,"LOOK: Sbrit is <= 0 within v-w loop: %e, abrit = %e, bbrit = %e, pr = %e, markvis = %e, x = %e, y = %e \n",sbrit,abrit,bbrit,Markp,Markv,cmx,cmy); fflush(fp_log); } } } // Calculate average value stresses and strainrates seismogenic zone // But here do not have proper stress and vel(e) yet ! Only yield strength .. #if setup < 10 if (relvw==1 && cmy<=gy[n_glayer+1]) { sbrit_ave = sbrit_ave + (abrit + bbrit*(1-lamb)*Markp); count_sezm = count_sezm + 1; } #endif } // In case of no rate dependency also calculate yield strength else { sbrit=abrit+bbrit*(1-lamb)*Markp; } // Check strength values if(sbrit<0) sbrit=0; if(sbrit>marks1[mm2]) sbrit=marks1[mm2]; // Save frictional properties to file for analyses // Save time and space by only doing at last timestep in prn output cycle #if setup > 9 if (n0==1 && start_cond==1 && relvw<=1.0) { fprintf(flfric," %d %e %e %e %e %e %e %e %e \n", mm2, cmx, cmy, Markp, sbrit, Markxx[mm1], Markxy[mm1],lamb,bbrit); } #endif // Store yield stress for post-processing and interpolation to nodes msbrit[mm1] = sbrit; // Store old-stress msii_old[mm1] = sigin; /* ---> 5. ! Viscoelastic case ! <--- */ if(stoksmod && timestepe && epsin) { /* Future plastic creep */ /* Future stresses calc */ xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+nueff); siginnew=2.0*nueff*epsin*xelvis+sigin*(1.0-xelvis); // Plastic yielding if new estimate or stress of previous timestep exceeds strength if(sbrit<siginnew || sbrit<sigin) { /* Executing plasticity by reseting stresses and viscosities */ // Note yn is defined at call to this viscalc routine if(yn==1) { /* XXX Density correction for the dilation angle XXX */ // We do not use dilation ! Not for any rock type if(markf0[mm2]>0 && markf1[mm2]>0) { /* Second invariant of viscoplastic strain calc, check */ e1=Marke[mm1]-sbrit/2.0/markgg[mm2]; /* Correction of divergence rate for plastic strain rate */ if(e1<markf1[mm2]) { e1=epsin-sbrit/2.0/nuduct; if(e1) *mdi0=2.0*e1*markf0[mm2]*timestepe; } } /* ! Recompute stress ! So stress no longer exceed strength */ if(sigin && sbrit<sigin) { Markxx[mm1] *= sbrit/sigin; Markxy[mm1] *= sbrit/sigin; sigin=sbrit; } /* ! Recompute viscosity ! So decrease viscosity accordingly to localize deformation */ nubrit=sbrit/(2.0*epsin+(sigin-sbrit)/timestepe/markgg[mm2]); if(nubrit<nueff) nueff=nubrit; /* Set initial plastic strain */ if(Marke[mm1]<=0) Marke[mm1]=1e-20; } } else { if(yn==1) Marke[mm1]=0; } } } /* ------------------ End calculation viscoplastic viscosity ---------------------------- */ /* Check calculated viscosity to be within hard code minimum and maximum */ if (nueff < nubeg) { nueff = nubeg; } if (nueff > nuend) { nueff = nuend; } if (nueff < markn0[mm2]) { nueff = markn0[mm2]; } if (nueff > markn1[mm2]) { nueff = markn1[mm2]; } // Pass final viscosity back to main model *mnu = nueff; } /* Number of nearest left vertical line find */ long int m1serch(double cmx) /* cmx - X coordinate */ { /* Variables */ long int m1,m10=0,m11=xnumx-1; /* Serch cycle */ do { m1=(m10+m11)/2; if (gx[m1]>cmx) m11=m1; else m10=m1; } while((m11-m10)>1); if(m10>xnumx-2) m10=xnumx-2; return m10; } /* Number of nearest left vertical line find */ /* Number of nearest upper horizontal line find */ long int m2serch(double cmy) /* cmy - Y coordinate */ { /* Variables */ long int m2,m20=0,m21=ynumy-1; /* Serch cycle */ do { m2=(m20+m21)/2; if (gy[m2]>cmy) m21=m2; else m20=m2; } while((m21-m20)>1); if(m20>ynumy-2) m20=ynumy-2; return m20; } /* Number of nearest upper horizontal line find */ /* Erosion/Sedimentation Function for markers */ /* mardy - marker vertical size, m */ void erosmarkomp(long int mm1, int yn, long int m10, double x, double y, char markt[], double marke[], double markd[]) /* mm1 - marker number */ /* yn - current sedimnts type 2,3 */ /* m1 - Up Left Node X,Y Num */ { /* Variables */ double e,e0; /* Surface level elevation definition */ /* Relativ Normalized coord Calc */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); /* Surface level elevation for marker definition */ e0=(e*ep[m10+1]+(1.0-e)*ep[m10]); /* Marker surface elevation definition */ if(markt[mm1]<2) { /* Water/Air -> Sediments conversion */ if(y>e0) {markt[mm1]=yn; marke[mm1]=0; markd[mm1]=-1.0;} } if(markt[mm1]>1) { /* Rock->Water/Air conversion */ if(y<e0) {markt[mm1]=0; marke[mm1]=0; markd[mm1]=-1.0;} } } /* OMP End Erosion/Sedimentation Function for markers */ /* OMP Rock to rock+melt transformation */ void meltingomp(double mtk, double mpb, long int mm1, int mm2, char Markt[], double Marke[], double *mxmelt, double *mhlatent) /* mtk - T, K */ /* mpb - P, bar */ /* mm1 - mark number */ { /* Melting related cahnge of the marker type */ /* Check marker type */ if (mm2==3 || mm2==4 || mm2==5 || mm2==6 || mm2==7 || mm2==8 || mm2==11 || mm2==16 || mm2==23 || mm2==24 || mm2==25 || mm2==26 || mm2==27 || mm2==28 || mm2==34 || mm2==36 || mm2==37 || mm2==38) if (mpb<0) mpb=0; switch(mm2) { /* Sediments, upper crust */ case 3: case 4: case 5: case 17: case 23: case 24: case 25: case 26: case 37: /* Basalt, Gabbro */ case 7: case 8: case 16: case 6: case 18: case 27: case 28: case 36: case 38: // mxmelt and mhlatent are already pointers to mem address, so you can enter them without & meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent); if(*mxmelt>0 && mm2<20) {Markt[mm1]+=20; Marke[mm1]=0;} if(*mxmelt<=0 && mm2>20) {Markt[mm1]-=20; Marke[mm1]=0;} return; /* Hydrated Peridotite */ case 11: case 34: meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent); if(*mxmelt>0 && mm2==11) {Markt[mm1]=34; Marke[mm1]=0;} if(*mxmelt<=0 && mm2==34) {Markt[mm1]=14; Marke[mm1]=0;} return; /* Others */ default: return; } } /* OMP End Rock to rock+melt transformation */ /* Melt fraction, density, viscosity, heat capacity calculation */ void meltpartomp(double mtk, double mpb, double x, double y, long int mm1, int mm2, double *mro,double *mbb, double *maa, double *mnu, double *mcp, double *mkt, double *mgg, double *mxmelt, double *mhlatent) /* mtk - T, K */ /* mpb - P, bar */ /* x,y - XY location of point for Vx,Vy calc */ /* mm1 - mark number */ /* mm2 - mark type */ { /* Val buffer */ double xmelt=0,ival,dmpb,dmtk,sduct,nueff,smin,smax,nmin,nmax,cpadd=0,vx0,vy0,pr0,sp0,ee0; long int m1,m10,m20; double Mnu,mdi0; double p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in; m10=m1serch(x); /* Check marker type */ if (mm2==23 || mm2==24 || mm2==25 || mm2==26 || mm2==27 || mm2==28 || mm2==34 || mm2==36 || mm2==37 || mm2==38) { /* Calculate melt fraction */ // mxmelt and mhlatent are already pointers to mem address, so you can enter them without & meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent); xmelt = *mxmelt; /* Standard adiabatic term: al=bro/(1+bro*(Tk-298.15)) */ *mbb=(markbb[mm2]*xmelt+markbb[mm2-20]*(1.0-xmelt))/(1.0-(markbb[mm2]*xmelt+markbb[mm2-20]*(1.0-xmelt))*(mtk-298.15)); *maa=(markaa[mm2]*xmelt+markaa[mm2-20]*(1.0-xmelt))/(1.0+(markaa[mm2]*xmelt+markaa[mm2-20]*(1.0-xmelt))*(mpb-1.0)*1e-3); /* Density */ /* Ro=ro0 */ if (densimod==0) { *mro=markro[mm2]*xmelt+markro[mm2-20]*(1.0-xmelt); } /* Ro=ro0*(1-bro*(TK-298.15))*(1+aro*(Pkbar-0.001)) */ else { ival=1.0; /* ========================= */ /* Mantle phase transitions */ /* ========================= */ /* if(mm2>=29 && mm2<=34 && markex[mm1]>0) ival=1.0-0.04*markex[mm1]; */ /* Eclogitization, St, Pv transitions in oceanic crust */ if(mm2>=27 && mm2<=28) { /* Eclogitization Ito and Kennedy, 1971 */ /*basalt=>garnet granulite (Ga-In) transition*/ p_ga_in=-9222.0+mtk*14.0; /*Not to have granulites at pressure lower than 2 kbar*/ if(p_ga_in<2000.0) p_ga_in=2000.0; /*garnet granulite=>eclogite (Pl-Out) transition*/ p_pl_out=-1460.0+mtk*20.0; /*Not to have eclogites at pressure lower than 12 kbar*/ if(p_pl_out<12000.0) p_pl_out=12000.0; if(mpb>p_ga_in) { rokf=0; if(mtk>teclmin) { if(mtk>teclmax) { rokf=0.16; } else { rokf=0.16*(mtk-teclmin)/(teclmax-teclmin); } } if(mpb>=p_pl_out) { ival=1.0+rokf; } else { ival=(1.0+rokf*(mpb-p_ga_in)/(p_pl_out-p_ga_in)); } } /* Coe->St transition Gerya et al., 2004, PCM */ p_st_in=59100.0+mtk*22.6; if(mpb>p_st_in) ival*=1.06; /* Pv transition, Mishin et al., 2008 with slope from Ito et al., 1990 */ /* Sp-out transition*/ p_sp_out=354000.0-mtk*40.0; /* Pv-in transition*/ p_pv_in=352000.0-mtk*40.0; if(mpb>p_pv_in) { rokf=0.08; if(mpb>=p_sp_out) { ival*=1.0+rokf; } else { ival*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in)); } } } /* Ol-Sp and Pv transitions in the mantle */ if(mm2>=29 && mm2<=34) { /* Ol-Sp transition, Katsura & Ito, 1989 */ /* Ol-out transition*/ p_ol_out=91000.0+mtk*27.0; /* Sp-in transition*/ p_sp_in=66000.0+mtk*39.0; /*Limit width of Sp-Ol transition to 2 kbar */ if(p_sp_in>p_ol_out-2000.0) p_sp_in=p_ol_out-2000.0; if(mpb>p_sp_in) { rokf=0.06; if(mpb>=p_ol_out) { ival*=1.0+rokf; } else { ival*=(1.0+rokf*(mpb-p_sp_in)/(p_ol_out-p_sp_in)); } } /* Pv transition, Ito et al., 1990 */ /* Sp-out transition*/ p_sp_out=304000.0-mtk*40.0; /* Pv-in transition*/ p_pv_in=302000.0-mtk*40.0; if(mpb>p_pv_in) { rokf=0.11; if(mpb>=p_sp_out) { ival*=1.0+rokf; } else { ival*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in)); } } } /* Density calculation with corrections */ *mro=xmelt*markro[mm2]*(1.0-markbb[mm2]*(mtk-298.15))*(1.0+markaa[mm2]*(mpb-1.0)*1e-3)+(1.0-xmelt)*ival*markro[mm2-20]*(1.0-markbb[mm2-20]*(mtk-298.15))*(1.0+markaa[mm2-20]*(mpb-1.0)*1e-3); } /**/ /* Viscosity */ /* Effective NU calc check */ /* Little melt */ // Assume similar to no melt, since go into viscalc.. if(xmelt<0.1) { // QUESTION TARAS - why plastic reset here? (i switched yn=1 to yes wrt old version, but before was set to 0 here) // while mm2 going in is mm2-20 ? And mm2>20 returns immediately; ok that put here mm2-20 ? viscalcomp(mtk,mpb,markx[mm1],marky[mm1],markv[mm1],markwa[mm1],markk[mm1],markp[mm1],markt[mm1],markexx[mm1],markexy[mm1],markxx,markxy,marke,mm1,mm2-20,1,m10,&Mnu,&mdi0); *mnu=Mnu; *mgg=markgg[mm2-20]; } /* Significant melt */ // Allowed to drop viscosity below minimum for rock type (init.t3c), but not below minimum for whole model (mode.t3c) else { /* Set viscosity and stress limits */ nmin=MAXV(markn0[mm2],nubeg); nmax=MINV(markn1[mm2],nuend); smin=MAXV(marks0[mm2],strmin); smax=MINV(marks1[mm2],strmax); /* Calc effective strain rate after second strain rate tensor invariant EEii=(1/2SUM(EPSik^2))^(1/2) */ m20=m2serch(y); allinteriomp(x,y,m10,m20,&vx0,&vy0,&pr0,&sp0,&ee0); // ee0=pow(eps[6]*eps[6]+eps[4]*eps[4],0.5); (was epsin) /* Effective NU calc check */ nueff=marknu[mm2]*exp(2.5+pow((1.0-xmelt)/xmelt,0.48)*(1.0-xmelt)); if(nueff<nmin) nueff=nmin; if(nueff>nmax) nueff=nmax; /* Ductile stress calc check */ sduct=nueff*2.0*ee0; if(sduct<smin && ee0) {nueff=0.5*smin/ee0; sduct=smin;} if(sduct>smax) {nueff=0.5*smax/ee0; sduct=smax;} *mnu=nueff; /* Shear modulus */ *mgg=markgg[mm2]; } /* Heat capacity */ *mcp=markcp[mm2]*xmelt+markcp[mm2-20]*(1.0-xmelt); /* heat conductivity */ *mkt=((markkt[mm2]+markkf[mm2]/(mtk+77.0))*exp(markkp[mm2]*mpb))*xmelt+((markkt[mm2-20]+markkf[mm2-20]/(mtk+77.0))*exp(markkp[mm2-20]*mpb))*(1.0-xmelt); /* Additional melting adiabatic term, heat capacity */ if(xmelt>0 && xmelt<1.0) { /* Melting adiabatic term: alm=-ro*(dHlat/dP)/T */ /* Numerical differentiation */ dmpb=mpb*0.001; meltpart1omp(mtk,mpb-dmpb,mm2,mxmelt,mhlatent); ival= *mhlatent; meltpart1omp(mtk,mpb+dmpb,mm2,mxmelt,mhlatent); ival-= *mhlatent; ival *= *mro / (mtk*2.0*dmpb*1e+5); *mbb+=ival; /* Melting heat capacity term: cpm=dHlat/dT */ /* Numerical differentiation */ dmtk=1.0; meltpart1omp(mtk+dmtk,mpb,mm2,mxmelt,mhlatent); ival= *mhlatent; meltpart1omp(mtk-dmtk,mpb,mm2,mxmelt,mhlatent); ival-= *mhlatent; ival/=2.0*dmtk; *mcp+=ival; } } else { *maa= *mbb= *mxmelt= *mhlatent= *mro= *mnu= *mcp= *mkt= 0; } } /* End OMP Rock to rock+melt transformation */ /* Melt fraction, latent heat calculation */ void meltpart1omp(double mtk, double mpb, int mm2, double *mxmelt, double *mhlatent) /* mtk - T, K */ /* mpb - P, bar */ /* x,y - XY location of point for Vx,Vy calc */ /* mm1 - mark number */ /* mm2 - mark type */ /* yn - type of calculation: 0 - Ro, 1 - Nu, 2 - Cp, 3 - kt */ { /* Val buffer */ double xmelt=0,hlatent=0,ival; long int m1; double ykm=mpb*3e-3,ts=0,tl=0; /* Calculate melt fraction using marker type */ if (ykm>0) switch(mm2) { /* Sediments: latent heat 300 kJ/kg (Bittner & Schmeling, 1995) */ case 3: case 4: case 5: case 17: case 23: case 24: case 25: case 37: /* Wet Solidus Temperature, Johannes, 1985, Poli & Schmidt, 2002 */ if (ykm<36.0) { ts=889.0+536.6/(ykm+1.609)+18.21/(ykm+1.609)/(ykm+1.609); } else { ts=831.3+2.0*ykm; } /* Dry Granite Liquidus, Johannes, 1985 */ tl=1262.0+3.0*ykm; hlatent=300000.0; break; /* Basalt, Gabbro: latent heat 380 kJ/kg (Bittner & Schmeling, 1995) */ case 7: case 8: case 16: case 27: case 28: case 36: case 6: case 18: case 26: case 38: /* Wet solidus, Schmidt & Poli, 1998 */ if (ykm<48.0) { ts=972.6-2111.0/(ykm+10.63)+70033.0/(ykm+10.63)/(ykm+10.63); } else { ts=935.4+0.1162*ykm+0.006937*ykm*ykm; } /* Dry Toleitic Basalt Liquidus, Hess, 1989 */ tl=1423.15+3.5*ykm; hlatent=380000.0; break; /* Peridotite: latent heat 400 kJ/kg Turcotte & Schubert, 1982, p.171 */ case 11: case 34: /* Wet solidus, Schmidt & Poli, 1998 */ if (ykm<72.0) { ts=1239.8+1493.0/(ykm+9.701); } else { ts=1266.3-0.3948*ykm+0.003893*ykm*ykm; } /* Dry Peridotite Liquidus, Hess, 1989 */ tl=2073.15+3.8*ykm; hlatent=400000.0; break; /* Other rocks - No melting */ default: break; } /* Melt fraction, latent heat calculation */ *mxmelt = *mhlatent = 0; if(tl) { /* Melt fraction calc, check */ xmelt=(mtk-ts)/(tl-ts); if(xmelt<0) xmelt=0; if(xmelt>1.0) xmelt=1.0; *mxmelt = xmelt; /* Latent heat calc */ hlatent *= xmelt; *mhlatent=hlatent; } } /* End OMP Melt fraction, latent heat calculation */ /* Hydration front progress after H2O budget */ double hydration2omp() { /* Val buffer */ double ysurf,vfiltr,yfiltr,dydx,dydx1,sy1,sy2,sy3,sy4,sy5,e1,mwamin,x0,y0,x1,y1,vx1,vy1; double hytimesum,hytimesum0; /* TD Database variables */ double W0,W1,W2,W3,R0,R1,R2,R3,n,e,dx,dy; double mtk,mpb,mwa,mro,dmwa,wro; double Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt; long int m1,m2,m3,mm1,marknum1=marknum; int mm2,mm3,n1,n2; fprintf(fp_log,"\n WATER Transport BEGIN \n");fflush(fp_log); /* Marker steps */ dx=dxwater; dy=dywater; /* Min water contents in the hydraten mantle wt% */ mwamin=0.1; /* Min Distance from erosion surface for water release */ ysurf=8000.0; /* Clear wa[] wt */ for (m1=0;m1<nodenum;m1++) { wa0[m1]=0; wa1[m1]=0; sol0[m1]=0; sol1[m1]=0; sol0[nodenum+m1]=1e+30; sol1[nodenum+m1]=-1e+30; sol0[nodenum2+m1]=1e+30; sol1[nodenum2+m1]=-1e+30; fre0[ m1]=1e+30; fre0[nodenum +m1]=-1e+30; fre0[nodenum2+m1]=1e+30; fre0[nodenum3+m1]=-1e+30; } /* Fluid marker generation cycle */ double start=omp_get_wtime(); for (mm1=0;mm1<marknum;mm1++) { // Reset fluid presence indicator for next marker for loop markwa[mm1] = 0; /* Marker type */ mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100; /* Marker cell number */ m1=m1serch(markx[mm1]); m2=m2serch(marky[mm1]); m3=m1*ynumy+m2; /* Erosion surface */ e1=(markx[mm1]-gx[m1])/(gx[m1+1]-gx[m1]); sy1=(e1*ep[m1+1]+(1.0-e1)*ep[m1]); /* Check markers out of grid and within hydration range */ if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && (markk[mm1]>0 || markt[mm1]>=50) && markt[mm1]<100) if((markd[mm1])>=0 && (markw[mm1])>=0 && mm2>1 && mm2!=9 && mm2!=10) { if(mm2<50) { /* P, T parameters calc */ mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2); mtk=(markk[mm1]); /* Mantle to Antigorite transformation */ antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m1,markt); /* Rocks to rock+melt transformation */ if (markt[mm1]>=20) { /* Check melting extent */ if(fre0[ +m3]>markx[mm1]-dx) fre0[ m3]=markx[mm1]-dx; if(fre0[nodenum +m3]<markx[mm1]+dx) fre0[nodenum +m3]=markx[mm1]+dx; if(fre0[nodenum2+m3]>marky[mm1]-dy) fre0[nodenum2+m3]=marky[mm1]-dy; if(fre0[nodenum3+m3]<marky[mm1]+dy) fre0[nodenum3+m3]=marky[mm1]+dy; } /* Compute TD variables */ tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt); mro=Mro; mwa=Mwa; /* Water changes in kg/m3 calc */ dmwa=mro*(mwa-markw[mm1])*1e-2; //{fprintf(fp_log,"H2O MARKER %ld %d %d %e %e %e %e %e %e %e",mm1,mm2,mm3,mtk-273.15,mpb/1000.0,mwa,mro,markw[mm1],markd[mm1],dmwa);getchar();} //{fprintf(fp_log,"H2O RELEASE %ld %d %d %e %e %e %e %e %e %e",mm1,mm2,mm3,mtk-273.15,mpb/1000.0,mwa,mro,markw[mm1],markd[mm1],dmwa);getchar();} /* Add water changes to the current cell, kg/m3 */ /* Water release */ if ((markw[mm1]-mwa)>dmwamin) { /* Save new water content */ markw[mm1]=mwa; /* Generation of fluid marker (NO FLUID From melts */ if (markt[mm1]<20 && marky[mm1]>sy1) { markt[marknum1]=markt[mm1]+50; markx[marknum1]=markx[mm1]; marky[marknum1]=marky[mm1]; markk[marknum1]=markk[mm1]; markd[marknum1]=1050.0; markw[marknum1]=-dmwa; /* Add aditional markers counter */ marknum1++; // If new marker is interesting for picking algorithm, flag to follow // Note is hard-coded in i2.c as well. Only here excluded fluid markers, since immobile can not become fluid if ( start_cond==1 && marky[marknum1]<85e3 && markx[marknum1]>gx[m10_hr] && markx[marknum1]<gx[m11_hr] && markt[marknum1]>49 && markt[marknum1]<100) { follow[marknum1]=2; nmf++; } /* Check hydration extent */ if(sol0[nodenum+m3]>markx[mm1]-dx) sol0[nodenum+m3]=markx[mm1]-dx; if(sol1[nodenum+m3]<markx[mm1]+dx) sol1[nodenum+m3]=markx[mm1]+dx; if(sol0[nodenum2+m3]>marky[mm1]-dy) sol0[nodenum2+m3]=marky[mm1]-dy; if(sol1[nodenum2+m3]<marky[mm1]+dy) sol1[nodenum2+m3]=marky[mm1]+dy; } } else /* Water consuming */ { if(dmwa>0) { wa1[m3]+=dmwa; sol1[m3]+=1.0; } } } else /* Fluid marker count */ { /* Check position */ if(marky[mm1]>sy1) { /* Check hydration extent */ if(sol0[nodenum+m3]>markx[mm1]-dx) sol0[nodenum+m3]=markx[mm1]-dx; if(sol1[nodenum+m3]<markx[mm1]+dx) sol1[nodenum+m3]=markx[mm1]+dx; if(sol0[nodenum2+m3]>marky[mm1]-dy) sol0[nodenum2+m3]=marky[mm1]-dy; if(sol1[nodenum2+m3]<marky[mm1]+dy) sol1[nodenum2+m3]=marky[mm1]+dy; } else /* Erase fluid marker */ { markx[mm1]=-1.0; markk[mm1]=0; } } } } /* Rock hydration cycle: rocks get hydrated by changing marker type mm2 */ start=omp_get_wtime(); for (mm1=0;mm1<marknum;mm1++) if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markt[mm1]<50) { /* Marker cell number */ m1=m1serch(markx[mm1]); m2=m2serch(marky[mm1]); m3=m1*ynumy+m2; /* Check markers within hydration range */ if(markx[mm1]>sol0[nodenum+m3] && marky[mm1]>sol0[nodenum2+m3] && (markx[mm1])<sol1[nodenum+m3] && (marky[mm1])<sol1[nodenum2+m3]) { /* Fluid presence mark */ markwa[mm1]=1; if(markt[mm1]==9 || markt[mm1]==10 || markt[mm1]==12 || markt[mm1]==14 || markt[mm1]==5 || markt[mm1]==6) { /* Mantle Hydration */ if (markt[mm1]!=5 && markt[mm1]!=6) { mm2=markt[mm1]=11; } else { mm2=markt[mm1]=markt[mm1]+12; } /* P, T parameters calc */ mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2); mtk=(markk[mm1]); /* Mantle to Antigorite transformation */ antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m1,markt); /* Rocks to rock+melt transformation */ if (markt[mm1]>=20) { /* Check melting extent */ if(fre0[ +m3]>markx[mm1]-dx) fre0[ m3]=markx[mm1]-dx; if(fre0[nodenum +m3]<markx[mm1]+dx) fre0[nodenum +m3]=markx[mm1]+dx; if(fre0[nodenum2+m3]>marky[mm1]-dy) fre0[nodenum2+m3]=marky[mm1]-dy; if(fre0[nodenum3+m3]<marky[mm1]+dy) fre0[nodenum3+m3]=marky[mm1]+dy; } /* Thermodynamic database use for Ro as function of Water content */ /* Compute TD variables */ tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt); mro=Mro; mwa=Mwa; /* Water changes in kg/m3 calc */ dmwa=mro*(mwa-markw[mm1])*1e-2; /* Add water changes to the current cell, kg/m3 */ /* Water consuming */ if (dmwa>0) { wa1[m3]+=dmwa; sol1[m3]+=1.0; } } } } /* Fluid marker computing cycle */ start=omp_get_wtime(); for (mm1=0;mm1<marknum1;mm1++) { /* Check markers out of grid and within hydration range */ if(markt[mm1]>=50 && markt[mm1]<100 && markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize) { /* Marker cell number */ m1=m1serch(markx[mm1]); m2=m2serch(marky[mm1]); m3=m1*ynumy+m2; /* Erosion surface */ e1=(markx[mm1]-gx[m1])/(gx[m1+1]-gx[m1]); sy1=(e1*ep[m1+1]+(1.0-e1)*ep[m1]); /* Water in melt region conversion */ if(markd[mm1]<1100.0 && markx[mm1]>fre0[m3] && marky[mm1]>fre0[nodenum2+m3] && markx[mm1]<fre0[nodenum+m3] && marky[mm1]<fre0[nodenum3+m3]) markd[mm1]=1150.0; /* Check position, no fluid above erosion/sedimentation level, no fluid passing through the melt */ if(marky[mm1]>sy1 && marky[mm1]<zdeep && (markd[mm1]<1100.0 || (markx[mm1]>fre0[m3] && marky[mm1]>fre0[nodenum2+m3] && markx[mm1]<fre0[nodenum+m3] && marky[mm1]<fre0[nodenum3+m3]))) { wa0[m3]+=markw[mm1]; sol0[m3]+=1.0; } else /* Erase fluid marker */ { markx[mm1]=-1.0; markk[mm1]=0; } } } if (printmod==10000) fprintf(fp_log,"\n Time taken for fluid computing cycle = %e s \n",omp_get_wtime()-start); /* Fluid marker consuming cycle */ start=omp_get_wtime(); for (mm1=0;mm1<marknum1;mm1++) { /* Marker type */ mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100; // What use? since will not use mm1>100 anyway.. /* Marker cell number */ m1=m1serch(markx[mm1]); m2=m2serch(marky[mm1]); m3=m1*ynumy+m2; /* Change water consuming rocks and fluid makers */ if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && (markk[mm1]>0 || markt[mm1]>=50) && markt[mm1]<100) if((markd[mm1])>=0 && (markw[mm1])>=0 && mm2>1 && mm2!=9 && mm2!=10 && mm2!=12 && mm2!=14 && mm2!=5 && mm2!=6) { // For all assimilating rock types: 0-50, except those one line above if(mm2<50) { /* P, T parameters calc */ // Why need to do this every time again? mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2); mtk=markk[mm1]; /* Thermodynamic database use for Ro, Water */ /* Compute TD variables */ tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt); mwa=Mwa; mro=Mro; /* Water change */ dmwa=mwa-markw[mm1]; /* Add water changes to the current cell, kg/m3 */ /* Water consuming */ if(dmwa>0) { if (wa1[m3]<=wa0[m3]) { /* Save complete new water content */ markw[mm1]=mwa; } else { /* COmpute, Save partial new water content */ markw[mm1]=markw[mm1]+dmwa*wa0[m3]/wa1[m3]; } } } // For all fluid markers: 50-100 else /* Fluid marker change */ { // Evaluate wether all free water is finished if(wa1[m3]<wa0[m3]) { /* Count water changes for fluid marker */ markw[mm1]*=1.0-wa1[m3]/wa0[m3]; } else /* Erase fluid marker */ { markx[mm1]=-1.0; markk[mm1]=0; } } } } /* Reset aditional markers */ fprintf(fp_log,"\n WATER BEG Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1); fflush(fp_log); mm1=0; while(marknum1>marknum && mm1<marknum) { /* Reload marker */ if((markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) && markt[mm1]<100) { /* Decrease aditional markers counter */ marknum1--; if(markx[marknum1]>=0); { /* Type save */ markt[mm1]=markt[marknum1]; /* X,Y, water reload */ markx[mm1]=markx[marknum1]; marky[mm1]=marky[marknum1]; markw[mm1]=markw[marknum1]; markd[mm1]=markd[marknum1]; markk[mm1]=markk[marknum1]; } } /* Increase markers counter */ mm1++; } fprintf(fp_log,"\n WATER END Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1); fflush(fp_log); /* Set new marker number */ marknum=marknum1; return 0; } /* End OMP Hydration front progress after H2O budget */ /* Erosion Surface progress */ void erosion() { /* Val buffer */ double v0,v1,dydx,x1,vx1,vy1,dy; double ertimesum,ertimesum0; long int m1,m2; /**/ /* Erosion Solution Cycle ------------------------------------------ */ ertimesum=0; ertimesum0=timestep; do { /* Save old cycle results */ for (m1=0;m1<xnumx;m1++) { ep0[m1]=ep[m1]; ep0[xnumx+m1]=ep[xnumx+m1]; } /**/ /**/ /**/ /* Initial timestep definition */ timestep=ertimesum0-ertimesum; /**/ /**/ /**/ /* Erosion timestep definition using material velosity field */ for (m1=0;m1<xnumx;m1++) { /* Calc horisontal Coordinate */ x1=gx[m1]; /**/ /* EROSION SURFACE */ /* Calc material velocity on the Surface using velosity field */ allinteri(x1,ep0[m1]); vx1=eps[11]; vy1=eps[12]; /* Check horizontal timestep */ /* Calc x derivative of y position of the Surface using upwind differences */ dydx=0; if(vx1>0 && m1>0) { timestep=MINV(timestep,(gx[m1]-gx[m1-1])/vx1); /* fprintf(fp_log,"111 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1-1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ } if(vx1<0 && m1<xnumx-1) { timestep=MINV(timestep,(gx[m1]-gx[m1+1])/vx1); /* fprintf(fp_log,"222 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1+1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ } /* Check vertical timestep */ if(vy1) { /* Horizontal line num definition */ m2=m2serch(ep0[m1]); /* Check timestep */ timestep=MINV(timestep,(gy[m2+1]-gy[m2])/ABSV(vy1)); /* fprintf(fp_log,"333 %ld %e %e %e %e %e %e %e",m2,vx1,vy1,(gy[m2+1]-gy[m2])/ABSV(vy1),ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ } /**/ /**/ /* INITIAL SURFACE */ /* Calc material velocity on the Initial Surface using velosity field */ allinteri(x1,ep0[xnumx+m1]); vx1=eps[11]; vy1=eps[12]; /* Check horizontal timestep */ /* Calc x derivative of y position of the Surface using upwind differences */ dydx=0; if(vx1>0 && m1>0) { timestep=MINV(timestep,(gx[m1]-gx[m1-1])/vx1); /* fprintf(fp_log,"444 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1-1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ } if(vx1<0 && m1<xnumx-1) { timestep=MINV(timestep,(gx[m1]-gx[m1+1])/vx1); /* fprintf(fp_log,"555 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1+1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ } /* Check vertical timestep */ if(vy1) { /* Horizontal line num definition */ m2=m2serch(ep0[xnumx+m1]); /* Check timestep */ timestep=MINV(timestep,(gy[m2+1]-gy[m2])/ABSV(vy1)); /* fprintf(fp_log,"666 %ld %e %e %e %e %e %e %e",m2,vx1,vy1,(gy[m2+1]-gy[m2])/ABSV(vy1),ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ } } /* fprintf(fp_log,"777 %e %e %e %e",ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar(); */ /**/ /**/ /**/ /* Displace Surface boundary */ /* for (m1=1;m1<xnumx-1;m1++) */ for (m1=0;m1<xnumx;m1++) { /* EROSION SURFACE */ /* Calculation of errosion rate */ v0=0; if(ep0[m1]<eroslev) { v0=eroscon+eroskoe*(eroslev-ep0[m1]); } /* Calculation of sedimentation rate */ v1=0; if(ep0[m1]>sedilev) { v1=sedicon+sedikoe*(ep0[m1]-sedilev); } /* Calc horisontal Coordinate */ x1=gx[m1]; /**/ /* Calc material velocity on the Surface using velosity field */ allinteri(x1,ep0[m1]); vx1=eps[11]; vy1=eps[12]; /**/ /* Erase erosion/sedimentation rate for marginal points */ if((m1==0 && vx1>0) || (m1==xnumx-1 && vx1<0)) v0=v1=0; /**/ /* Calc x derivative of y position of the Surface using upwind differences */ dydx=0; if(vx1>0 && m1>0) { dydx=(ep0[m1]-ep0[m1-1])/(gx[m1]-gx[m1-1]); /* fprintf(fp_log,"AAA %e %e",ep0[m1],dydx);getchar(); */ } if(vx1<0 && m1<xnumx-1) { dydx=(ep0[m1+1]-ep0[m1])/(gx[m1+1]-gx[m1]); /* fprintf(fp_log,"BBB %e %e",ep0[m1],dydx);getchar(); */ } /* Recalc new Surface position */ ep[m1]+=timestep*(v0-v1+vy1-dydx*vx1); /* fprintf(fp_log,"SURFACE %ld %e %e %e %e %e %e %e %e",m1,x1,v0,v1,vx1,vy1,dydx,ep[m1]);getchar(); */ /**/ /**/ /**/ /* INITIAL SURFACE */ /* Initial surface displacement */ /* Calc material velocity on the Surface using velosity field */ allinteri(x1,ep0[xnumx+m1]); vx1=eps[11]; vy1=eps[12]; /* Calc x derivative of y position of Initial Surface using upwind differences */ dydx=0; if(vx1>0 && m1>0) { dydx=(ep0[xnumx+m1]-ep0[xnumx+m1-1])/(gx[m1]-gx[m1-1]); /* fprintf(fp_log,"AAA %e ",dydx);getchar(); fprintf(fp_log,"AAA %e ",dydx);getchar(); */ } if(vx1<0 && m1<xnumx-1) { dydx=(ep0[xnumx+m1+1]-ep0[xnumx+m1])/(gx[m1+1]-gx[m1]); /* fprintf(fp_log,"BBB %e ",dydx);getchar(); */ } /* Recalc new Initial Surface position */ ep[xnumx+m1]+=timestep*(vy1-dydx*vx1); /**/ } /**/ /**/ /**/ /**/ /* Relax EROSION surface */ if (0==0) for (m1=0;m1<xnumx-1;m1++) { /* Calc x derivative of y position */ dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]); /* Relax surface for critical slope */ if(dydx>slopemax) { dy=((ep[m1+1]-ep[m1])-slopemax*(gx[m1+1]-gx[m1]))/2.0; ep[m1] +=dy; ep[m1+1]-=dy; /* dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]); fprintf(fp_log,"AAA %ld %e %e",m1,slopemax,dydx);getchar(); */ } if(dydx<-slopemax) { dy=((ep[m1+1]-ep[m1])+slopemax*(gx[m1+1]-gx[m1]))/2.0; ep[m1] +=dy; ep[m1+1]-=dy; /* dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]); fprintf(fp_log,"BBB %ld %e %e",m1,slopemax,dydx);getchar(); */ } } /**/ /**/ /**/ /* Add Erosion step */ ertimesum+=timestep; /**/ /**/ /**/ /* Print Results */ if (printmod) { fprintf(fp_log,"\n EROSION STEP = %e YEARS EROSION TIME = %e YEARS \n",timestep/3.15576e+7,ertimesum/3.15576e+7); fflush(fp_log); } } while(ertimesum<ertimesum0); /* Restore timestep */ timestep=ertimesum0; } /* Erosion Surface progress */ /* Thermodynamic database use for ro, Cp */ // Within a loop over all markers, do: // Interpolation properties between four nearest points in thermodynamic database dep. on T,P,composition void tdbasecalcomp(double x, double y, double mtk, double mpb, int mm2, long int mm1, long int m10, double *Mgg, double *Mro, double *Mwa, double *Mcp, double *Mbb, double *Maa, double *Mdhh, double *Mkt) { /* TD Database variables, dTK,dPB - TK, PB step for tabulation in TD database */ double H0,H1,H2,H3,R0,R1,R2,R3,G0,G1,G2,G3,W0,W1,W2,W3,n,e; /* Val Buffers */ int n1,n2,mm3,ynpb; double mhh0,mhh1,mdhh,maa,mwa,dmwa,wro,mro,mcp,mbb,mgg,mkt,mkt1,pbmax,xold,kr01,kr1,kr10,xkr,krad; long int m1=m10; double sy1,e1; /* Maximal pressure for the shallow database */ pbmax=pbmin+pbstp*(double)(pbnum-1); /* Adiabate computing */ ynpb=0; if(1==0 && timesum<3.15576e+7*1e+3) {fprintf(fp_log,"in adiabate: can not right ? \n"); fflush(fp_log); mpb*=timesum/(3.15576e+7*1e+3); ynpb=1;} /* Reset TD variables */ *Mgg=*Mro=*Mwa=*Mcp=*Mbb=*Maa=0; /* Thermal conductivity */ /* m895 Dry peridotite Fe=12 */ /* Olivine: Hoffmeister, 1999; Hoffmeister & Yuen, 2005 */ if(mpb<235000.0) { /* Lattice k */ mkt1=(1.878+770.9/MINV(mtk,1200.0))*(1.0+4.26e-6*mpb); /* Radiative k 0.1 mm */ kr01=pow(mtk/4000.0,3.0); /* Radiative k 1 mm */ kr1=pow(mtk/1774.0,3.0); /* Radiative k 10 mm */ xkr=pow(mtk/1636.0,10.0); xkr/=xkr+1.0; kr10=pow((mtk-1000.0*xkr)/1011.0,3.0)-0.7713*xkr; } /* Perovskite: Hoffmeister, 1999; Hoffmeister & Yuen, 2005 */ else { /* Lattice k */ mkt1=(1.291+1157.0/MINV(mtk,2100.0))*(1.0+2.50e-6*mpb); /* Radiative k 0.1 mm */ kr01=pow(mtk/3591.0,3.0); /* Radiative k 1 mm */ kr1=pow(mtk/2117.0,3.0); /* Radiative k 10 mm */ xkr=pow(mtk/1500.0,4.0); xkr/=xkr+1.0; kr10=pow((mtk+4000.0*xkr)/5776.0,3.0)+2.822*xkr; } krad=kr1; /* Shallow TD base type */ if(mpb<pbmax && ynpb==0) { /* TD base type */ switch (mm2) { /* Dry Upper crust */ case 5: mm3=11; break; /* Wet Upper crust */ case 17: mm3=12; break; /* Dry Lower crust */ case 6: mm3=13; break; /* Wet Lower crust */ case 18: mm3=14; break; /* Sediments */ case 2: case 3: case 4: mm3=5; break; /* Molten Sediments */ case 37: case 25: case 22: case 23: case 24: mm3=6; break; /* Basalt */ case 16: case 7: mm3=7; break; /* Molten Basalt */ case 36: case 27: mm3=8; break; /* Gabbro */ case 38: case 26: case 8: mm3=3; break; /* Molten Gabbro */ case 28: mm3=4; break; /* Dry peridotite */ case 9: case 12: case 14: case 10: mm3=0; break; /* Wet peridotite */ case 13: case 11: mm3=1; break; /* Molten peridotite */ case 34: mm3=2; break; /* Unknown type */ default: {fprintf(fp_log,"Shallow TD: Unknown rock type for TD database %d, for marker %ld with T= %f, P=%f \n",mm2,mm1,mtk,mpb); fflush(fp_log); exit(0);} } /* ABCD-4Cell Number */ // Get weights for nearest points in thermodynamic database e=(mtk-tkmin)/tkstp; if(e<0) e=0; if(e>(double)(tknum-1)) e=(double)(tknum-1); n=(mpb-pbmin)/pbstp; if(n<0) n=0; if(n>(double)(pbnum-1)) n=(double)(pbnum-1); n1=(int)(e); if(n1>tknum-2) n1=tknum-2; n2=(int)(n); if(n2>pbnum-2) n2=pbnum-2; /* e,n Calc */ e=(e-(double)(n1)); n=(n-(double)(n2)); /* Ro H values */ /* 0 2 */ /* 1 3 */ R0=td[n1 ][n2 ][mm3][0]*1000.0; R1=td[n1 ][n2+1][mm3][0]*1000.0; R2=td[n1+1][n2 ][mm3][0]*1000.0; R3=td[n1+1][n2+1][mm3][0]*1000.0; H0=td[n1 ][n2 ][mm3][1]*1000.0*4.1837; H1=td[n1 ][n2+1][mm3][1]*1000.0*4.1837; H2=td[n1+1][n2 ][mm3][1]*1000.0*4.1837; H3=td[n1+1][n2+1][mm3][1]*1000.0*4.1837; W0=td[n1 ][n2 ][mm3][4]; W1=td[n1 ][n2+1][mm3][4]; W2=td[n1+1][n2 ][mm3][4]; W3=td[n1+1][n2+1][mm3][4]; G0=td[n1 ][n2 ][mm3][3]*1000.0;G0*=G0*R0; G1=td[n1 ][n2+1][mm3][3]*1000.0;G1*=G1*R1; G2=td[n1+1][n2 ][mm3][3]*1000.0;G2*=G2*R2; G3=td[n1+1][n2+1][mm3][3]*1000.0;G3*=G3*R3; /* Shear modulus calc by interpolation */ mgg=((G0*(1.0-n)+G1*n)*(1.0-e)+(G2*(1.0-n)+G3*n)*e); /* Ro calc by interpolation */ mro=((R0*(1.0-n)+R1*n)*(1.0-e)+(R2*(1.0-n)+R3*n)*e); /* Water wt% calc by interpolation */ mwa=((W0*(1.0-n)+W1*n)*(1.0-e)+(W2*(1.0-n)+W3*n)*e); /* Add pore fluid */ /* Erosion surface */ e1=(x-gx[m10])/(gx[m10+1]-gx[m10]); sy1=y-(e1*ep[m10+1]+(1.0-e1)*ep[m10]); if(marks0[mm2]>0 && sy1>0 && sy1<zmpor && mtk<tkpor) { dmwa=marks0[mm2]*(tkpor-mtk)/(tkpor-273.15)*(zmpor-sy1)/zmpor; mwa+=dmwa; wro=1050.0; mro=mro/(1.0+dmwa*1e-2*(mro/wro-1.0)); } /* Cp calc by interpolation */ mcp=((H2-H0)*(1.0-n)+(H3-H1)*n)/tkstp; if(mcp<1e+2) mcp=1e+2; else if(mcp>5e+4) mcp=5e+4; /* Effective adiabatic betta=1/V*dV/dT=ro/T*[-dH/dP+V] calc by interpolation */ mbb=(2.0/(R1+R0)-(H1-H0)/pbstp/1e+5)*(1.0-e)+(2.0/(R3+R2)-(H3-H2)/pbstp/1e+5)*e; mbb*=mro/mtk; if(mbb<-1e-2) mbb=-1e-2; else if(mbb>1e-2) mbb=1e-2; /* Effective compressibility term alpha=1/ro*d(ro)/dP calc by interpolation */ maa=(2.0/(R1+R0)*(R1-R0)*(1.0-e)+2.0/(R3+R2)*(R3-R2)*e)/pbstp/1e+5; if(maa<0) maa=0; /* Activation enthalpy recalc using enthalpy changes */ /* Current Enthalpy */ mhh1=((H0*(1.0-n)+H1*n)*(1.0-e)+(H2*(1.0-n)+H3*n)*e); /* Pmin Enthalpy */ mhh0=(td[n1][0 ][mm3][1]*(1.0-e) + td[n1+1][0 ][mm3][1]*e)*1000.0*4.1837; /* Enthalpy Difference calc */ mdhh=(mhh1-mhh0); /* Save TD variables */ *Mgg=mgg; *Mro=mro; *Mwa=mwa; *Mcp=mcp; *Mbb=mbb; *Maa=maa; *Mdhh=mdhh; *Mkt+=krad; } /* Deep TD base type */ if(1==0 || mpb>0.75*pbmax || ynpb==1) { switch (mm2) { /* MORB DATABASE */ /* UPPER, LOWER Crust */ case 5: case 6: case 17: case 18: case 37: case 38: /* Sediments */ case 2: case 3: case 4: /* Molten Sediments */ case 22: case 23: case 24: /* Molten crust */ case 25: case 26: /* Basalt */ case 16: case 7: /* Molten Basalt */ case 36: case 27: /* Gabbro */ case 8: /* Molten Gabbro */ case 28: mm3=10; break; /**/ /* PIROLITE DATABASE */ /* Dry peridotite */ case 9: case 12: case 14: case 10: /* Wet peridotite */ case 13: case 11: /* Molten peridotite */ case 34: mm3=9; break; // Added missing rock types case 15: case 19: case 20: case 21: case 29: case 30: /* Unknown type */ default: {fprintf(fp_log,"Deep TD: Unknown rock type for TD database %d, for marker %ld with T= %f, P=%f \n",mm2,mm1,mtk,mpb); fflush(fp_log); exit(0);} } /* ABCD-4Cell Number */ e=(mtk-tkmin1)/tkstp1; if(e<0) e=0; if(e>(double)(tknum1-1)) e=(double)(tknum1-1); n=(mpb-pbmin1)/pbstp1; if(n<0) n=0; if(n>(double)(pbnum1-1)) n=(double)(pbnum1-1); n1=(int)(e); if(n1>tknum1-2) n1=tknum1-2; n2=(int)(n); if(n2>pbnum1-2) n2=pbnum1-2; /* e,n Calc */ e=(e-(double)(n1)); n=(n-(double)(n2)); /* Ro H values */ /* 0 2 */ /* 1 3 */ R0=td[n1 ][n2 ][mm3][0]*1000.0; R1=td[n1 ][n2+1][mm3][0]*1000.0; R2=td[n1+1][n2 ][mm3][0]*1000.0; R3=td[n1+1][n2+1][mm3][0]*1000.0; H0=td[n1 ][n2 ][mm3][1]*1000.0*4.1837; H1=td[n1 ][n2+1][mm3][1]*1000.0*4.1837; H2=td[n1+1][n2 ][mm3][1]*1000.0*4.1837; H3=td[n1+1][n2+1][mm3][1]*1000.0*4.1837; W0=td[n1 ][n2 ][mm3][4]; W1=td[n1 ][n2+1][mm3][4]; W2=td[n1+1][n2 ][mm3][4]; W3=td[n1+1][n2+1][mm3][4]; G0=td[n1 ][n2 ][mm3][3]*1000.0;G0*=G0*R0; G1=td[n1 ][n2+1][mm3][3]*1000.0;G1*=G1*R1; G2=td[n1+1][n2 ][mm3][3]*1000.0;G2*=G2*R2; G3=td[n1+1][n2+1][mm3][3]*1000.0;G3*=G3*R3; /* Shear modulus calc by interpolation */ mgg=((G0*(1.0-n)+G1*n)*(1.0-e)+(G2*(1.0-n)+G3*n)*e); /* Ro calc by interpolation */ mro=((R0*(1.0-n)+R1*n)*(1.0-e)+(R2*(1.0-n)+R3*n)*e); /* Water wt% calc by interpolation */ mwa=0; /* Water in crystals */ if(mm2!=9 && mm2!=10 && mm2!=14 && mpb<235000.0) { dmwa=0.1; mwa+=dmwa; wro=1050.0; mro=100.0/((100.0-dmwa)/mro+dmwa/wro); } /* Cp calc by interpolation */ mcp=((H2-H0)*(1.0-n)+(H3-H1)*n)/tkstp1; if(mcp<1e+2) mcp=1e+2; else if(mcp>5e+4) mcp=5e+4; /* Effective adiabatic betta=1/V*dV/dT=ro/T*[-dH/dP+V] calc by interpolation */ mbb=(2.0/(R1+R0)-(H1-H0)/pbstp1/1e+5)*(1.0-e)+(2.0/(R3+R2)-(H3-H2)/pbstp1/1e+5)*e; mbb*=mro/mtk; if(mbb<-1e-2) mbb=-1e-2; else if(mbb>1e-2) mbb=1e-2; /* Effective compressibility term alpha=1/ro*d(ro)/dP calc by interpolation */ maa=(2.0/(R1+R0)*(R1-R0)*(1.0-e)+2.0/(R3+R2)*(R3-R2)*e)/pbstp1/1e+5; if(maa<0) maa=0; /* Activation enthalpy recalc using enthalpy changes */ /* Current Enthalpy */ mhh1=((H0*(1.0-n)+H1*n)*(1.0-e)+(H2*(1.0-n)+H3*n)*e); /* Pmin Enthalpy */ mhh0=(td[n1][0 ][mm3][1]*(1.0-e) + td[n1+1][0 ][mm3][1]*e)*1000.0*4.1837; /* Enthalpy Difference calc */ mdhh=(mhh1-mhh0); /* Thermal conductivity */ mkt=mkt1+krad; /* Computing transitional parameters */ if(1==0 || mpb>pbmax || ynpb==1) // Manny has 1==1 { /* Save TD variables */ *Mgg=mgg; *Mro=mro; *Mwa=mwa; *Mcp=mcp; *Mbb=mbb; *Maa=maa; *Mdhh=mdhh; *Mkt=mkt; } else { xold=(pbmax-mpb)/(0.25*pbmax); /* Save TD variables */ // Second column comes from shallow database assignment above, but I never reach into this deep one ! mgg=mgg*(1.0-xold)+ *Mgg *xold; mro=mro*(1.0-xold)+ *Mro *xold; mwa=mwa*(1.0-xold)+ *Mwa *xold; mcp=mcp*(1.0-xold)+ *Mcp *xold; mbb=mbb*(1.0-xold)+ *Mbb *xold; maa=maa*(1.0-xold)+ *Maa *xold; mdhh=mdhh*(1.0-xold)+ *Mdhh *xold; mkt=mkt*(1.0-xold)+ *Mkt *xold; *Mgg=mgg; *Mro=mro; *Mwa=mwa; *Mcp=mcp; *Mbb=mbb; *Maa=maa; *Mdhh=mdhh; *Mkt=mkt; } } } /* End OMP Thermodynamic database use for ro, Cp */ // *** Interpolation routines using the following nodal locations *** /* Staggered Nodes num */ /* [0] [3] [6] */ /* T0,xy0 Vy0 T3,xy3 Vy3 */ /* */ /* Vx0 P4,xx4,yy4 Vx3 P7,xx7,yy7 */ /* */ /* [1] [4] [7] */ /* T,xy1 Vy1 T4,xy4 Vy4 */ /* */ /* Vx1 P5,xx5,yy5 Vx4 P8,xx8,yy8 */ /* */ /* [2] [5] [8] */ /* */ /* */ /* Weights for horizontal and vertical nodes calculation for marker interpolation */ void nodewt(long int m1min, long int m1max, long int m2min, long int m2max, double x, double y, int ynx, int yny) /* m1min,m1max, m2min,m2max - node X,Y number limits */ /* x,y - current pont coordinates */ /* ynx, yny - Type of shifts: No(0), Back(-1), Forw(1) */ { /* Eyy vertical position */ long int m3; int nx,ny; /* Weigths in horizontal directions */ /* Load distances to xn[] */ if(ynx<0) { for (m3=m1min;m3<=m1max;m3++) { xn[m3-m1min]=(gx[m3]+gx[m3-1])/2.0; } } if(ynx==0) { for (m3=m1min;m3<=m1max;m3++) { xn[m3-m1min]=gx[m3]; } } if(ynx>0) { for (m3=m1min;m3<=m1max;m3++) { xn[m3-m1min]=(gx[m3]+gx[m3+1])/2.0; } } /* Calc maximal position in xn[] */ nx=(int)(m1max-m1min); /* Calc coefficients for horizontal direction */ fdweight(nx,0,x); /**/ /* Reload horizontal coefficients to cn[] */ for (m3=0;m3<=nx;m3++) { cn[m3][1]=cn[m3][0]; } /* Weigths in vertical directions */ /* Load distances to xn[] */ if(yny<0) { for (m3=m2min;m3<=m2max;m3++) { xn[m3-m2min]=(gy[m3]+gy[m3-1])/2.0; } } if(yny==0) { for (m3=m2min;m3<=m2max;m3++) { xn[m3-m2min]=gy[m3]; } } if(yny>0) { for (m3=m2min;m3<=m2max;m3++) { xn[m3-m2min]=(gy[m3]+gy[m3+1])/2.0; } } /* Calc maximal position in xn[] */ ny=(int)(m2max-m2min); /* Calc coefficients for horizontal direction */ fdweight(ny,0,y); } /* End Weights for horizontal and vertical nodes calculation for marker interpolation */ /* Calculation of EE,VX,VY,ESP, and PR by Interpolation */ void allinteriomp(double x, double y, long int m10, long int m20, double *VX, double *VY, double *PR, double *ESP, double *EE) /* x,y - XY location of point for Vx,Vy calc */ { /* Counters */ long int m1,m2,m3; /* en-NormalisedDistance */ // Keep EXX and EXY local here, so only calculates EE double e,n,ival,xrat,EXX,EXY; /**/ /**/ /* Check X,Y */ if(x<0) x=0; else if(x>xsize) x=xsize; if(y<0) y=0; else if(y>ysize) y=ysize; /**/ /**/ /**/ /* Check weighting for interpolation */ xrat=2.0/3.0; if(x<(gx[0]+gx[1])/2.0) xrat=1.0; if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0; if(y<(gy[0]+gy[1])/2.0) xrat=1.0; if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0; /**/ /**/ /**/ // Store for more usage throughout subroutine m1=m10; m2=m20; /**/ /**/ /**/ /* EXY, ESP interpolation ------------------------ */ // Clear buffer *ESP=0; /* Horizontal,Vertical limits for interpolation calc */ if(m10<1) m10=1; if(m10>xnumx-3) m10=xnumx-3; if(m20<1) m20=1; if(m20>ynumy-3) m20=ynumy-3; /**/ /* Calc normalized distances */ // Note that the nodal distance is now fixed, while before could change it with intermod. If want that again see old scripts in dynwif/CleanOldRun.. e=(x-gx[m10])/(gx[m10+1]-gx[m10]); n=(y-gy[m20])/(gy[m20+1]-gy[m20]); /* Vx interpolation ------------------------ */ m3=m10*ynumy+m20; EXY=(1.0-e)*(1.0-n)*exy[m3]+(1.0-e)*n*exy[m3+1]+e*(1.0-n)*exy[m3+ynumy]+e*n*exy[m3+ynumy+1]; *ESP=(1.0-e)*(1.0-n)*esp[m3]+(1.0-e)*n*esp[m3+1]+e*(1.0-n)*esp[m3+ynumy]+e*n*esp[m3+ynumy+1]; /* End EXY, ESP interpolation ------------------------ */ /**/ /**/ /**/ /* Exx, P interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; *EE=0; *PR=0; *VX=0; *VY=0; /* Horizontal,Vertical limits for interpolation calc */ if(x>(gx[m10]+gx[m10+1])/2.0) m10++; if(y>(gy[m20]+gy[m20+1])/2.0) m20++; if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2; if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0); n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0); /* Interpolation ------------------------ */ m3=m10*ynumy+m20; EXX=(1.0-e)*(1.0-n)*exx[m3]+(1.0-e)*n*exx[m3+1]+e*(1.0-n)*exx[m3+ynumy]+e*n*exx[m3+ynumy+1]; // QUESTION TARAS why Interpolate pressure here, do already in interp or d? Now I do port it back, so rm if no need ... // I guess you could also formulate this more in general, as sometimes I have the feeling some variables are interpolated needlessly. Could you please go over these routines and removed what is not really need to speed the code up? *PR=(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1]; // Include small weight (xrat) from farther away nodes for velocities *VX=( (1.0-e)*(1.0-n)*(vx[m3-1]+vx[m3-ynumy-1])+(1.0-e)*n*(vx[m3]+vx[m3-ynumy]) +e*(1.0-n)*(vx[m3+ynumy-1]+vx[m3-1])+e*n*(vx[m3+ynumy]+vx[m3]) ) * 0.5*(1.0-xrat); *VY=( (1.0-e)*(1.0-n)*(vy[m3-ynumy]+vy[m3-ynumy-1])+(1.0-e)*n*(vy[m3-ynumy+1]+vy[m3-ynumy]) +e*(1.0-n)*(vy[m3]+vy[m3-1])+e*n*(vy[m3+1]+vy[m3]) ) * 0.5*(1.0-xrat); //eps[11]+=ival*(vx[m3-1]+vx[m3-ynumy-1])*0.5*(1.0-xrat); QUESTION TARAS Why use nodes above and left above here ?? //eps[12]+=ival*(vy[m3-ynumy]+vy[m3-ynumy-1])*0.5*(1.0-xrat); // Calculate second invariant *EE=pow(EXX*EXX+EXY*EXY,0.5); /* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */ /* Vx interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; /* Horizontal,Vertical limits for interpolation calc */ if(y<(gy[m20]+gy[m20+1])/2.0) m20-=1; if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2; if(m20<0) m20=0; if(m20>ynumy-3) m20=ynumy-3; /* Calc normalized distances */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); n=(y-(gy[m20]+gy[m20+1])/2.0)/((gy[m20+2]-gy[m20])/2.0); /* Vx interpolation ------------------------ */ m3=m10*ynumy+m20; *VX+=((1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1])*xrat; /* End Vx interpolation ------------------------ */ /**/ /**/ /**/ /* Vy interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; /* Horizontal,Vertical limits for interpolation calc */ if(x<(gx[m10]+gx[m10+1])/2.0) m10-=1; if(m10<0) m10=0; if(m10>xnumx-3) m10=xnumx-3; if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-(gx[m10]+gx[m10+1])/2.0)/((gx[m10+2]-gx[m10])/2.0); n=(y-gy[m20])/(gy[m20+1]-gy[m20]); /* Vy interpolation ------------------------ */ m3=m10*ynumy+m20; *VY+=((1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1])*xrat; /* End Vy interpolation ------------------------ */ /**/ /**/ /**/ } /* OMP Interpolate Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy from surrounding nodes to marker at x,y */ /* OMP Calculation of T,T0 for current location by Interpolation */ void allintertomp(double x, double y, long int m10, long int m20, double *TK, double *TK2) /* x,y - XY location of point for Vx,Vy calc */ /* m10, m20 - Upper left node */ // TK - marker temperature { /* Counters */ long int m3; /* en-NormalizedDistance */ double e,n,ival; /* Check X,Y */ if(x<0) x=0; else if(x>xsize) x=xsize; if(y<0) y=0; else if(y>ysize) y=ysize; /* T interpolation ------------------------ */ /* Buffer clear */ *TK=*TK2=0; /* Horizontal,Vertical limits for interpolation calc */ if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2; if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); n=(y-gy[m20])/(gy[m20+1]-gy[m20]); /* T interpolation ------------------------ */ m3=m10*ynumy+m20; *TK=(1.0-e)*(1.0-n)*tk[m3]+(1.0-e)*n*tk[m3+1]+e*(1.0-n)*tk[m3+ynumy]+e*n*tk[m3+ynumy+1]; *TK2=(1.0-e)*(1.0-n)*tk2[m3]+(1.0-e)*n*tk2[m3+1]+e*(1.0-n)*tk2[m3+ynumy]+e*n*tk2[m3+ynumy+1]; /* End T interpolation ------------------------ */ } /* OMP Calculation of T,T0 for current location by Interpolation */ /* OMP Calculation of P by Interpolation */ double allinterpomp(double x, double y, long int m10, long int m20) /* x,y - XY location of point for Vx,Vy calc */ /* m10, m20 - Upper left node */ { /* Counters */ long int m3; /* en-Normalized distance */ double ival,e,n; /* Check X,Y */ if(x<0) x=0; else if(x>xsize) x=xsize; if(y<0) y=0; else if(y>ysize) y=ysize; /* Buffer clear */ ival=0; /* Horizontal,Vertical limits for interpolation calc */ if(x>(gx[m10]+gx[m10+1])/2.0) m10++; if(y>(gy[m20]+gy[m20+1])/2.0) m20++; if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2; if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0); n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0); /* P interpolation ------------------------ */ m3=m10*ynumy+m20; ival=(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1]; /* Return pressure */ return ival; /* fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar(); if(timestep){fprintf(fp_log,"P1 %e %e %ld %ld %e %e %e",x,y,m10,m20,e,n,ival);getchar();} */ } /* OMP End calculation of P by Interpolation */ /* Calculation of SIGij by Interpolation */ void allinterdomp(double x, double y,long int m10, long int m20, double *TK,double *EXY,double *EXYE,double *SXY,double *SXYE,double *EXX,double *SXX,double *PR,double *SXXE,double *SPPE,double *EXXE,double *VX, double *MVX, double *VY, double *MVY) /* x,y - XY location of point for Vx,Vy calc */ { /* Counters */ long int m1,m2,m3; /* en-NormalisedDistance */ double ival,e,n,xrat; /**/ /**/ /* Check X,Y */ if(x<0) x=0; else if(x>xsize) x=xsize; if(y<0) y=0; else if(y>ysize) y=ysize; /**/ /**/ /**/ /* Store Up Left Node X,Y Num for later re-usage */ m1=m10; m2=m20; /**/ /**/ /* Check weighting for interpolation */ xrat=2.0/3.0; if(x<(gx[0]+gx[1])/2.0) xrat=1.0; if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0; if(y<(gy[0]+gy[1])/2.0) xrat=1.0; if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0; /**/ /**/ /* T interpolation ------------------------ */ /* Buffer clear */ *TK=0; /* Horizontal,Vertical limits for interpolation calc */ if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2; if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); n=(y-gy[m20])/(gy[m20+1]-gy[m20]); /* EPSxy Interpolate after interpolation weights */ m3=m10*ynumy+m20; *TK=(1.0-e)*(1.0-n)*tk[m3]+(1.0-e)*n*tk[m3+1]+e*(1.0-n)*tk[m3+ynumy]+e*n*tk[m3+ynumy+1]; /**/ /* End SIGij old interpolation ------------------------ */ /* End T interpolation ------------------------ */ /**/ /**/ /**/ /* SIGxy interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; *EXY=*EXYE=*SXY=*SXYE=0; /* Horizontal,Vertical limits for interpolation calc */ if(m10<1) m10=1; if(m10>xnumx-3) m10=xnumx-3; if(m20<1) m20=1; if(m20>ynumy-3) m20=ynumy-3; /* Calc normalized distances */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); n=(y-gy[m20])/(gy[m20+1]-gy[m20]); /**/ /* EPSxy Interpolate after interpolation weights */ m3=m10*ynumy+m20; *EXY=(1.0-e)*(1.0-n)*exy[m3]+(1.0-e)*n*exy[m3+1]+e*(1.0-n)*exy[m3+ynumy]+e*n*exy[m3+ynumy+1]; *EXYE=(1.0-e)*(1.0-n)*exye[m3]+(1.0-e)*n*exye[m3+1]+e*(1.0-n)*exye[m3+ynumy]+e*n*exye[m3+ynumy+1]; *SXY=(1.0-e)*(1.0-n)*sxy[m3]+(1.0-e)*n*sxy[m3+1]+e*(1.0-n)*sxy[m3+ynumy]+e*n*sxy[m3+ynumy+1]; *SXYE=(1.0-e)*(1.0-n)*sxye[m3]+(1.0-e)*n*sxye[m3+1]+e*(1.0-n)*sxye[m3+ynumy]+e*n*sxye[m3+ynumy+1]; /* End SIGxy interpolation ------------------------ */ /**/ /**/ /**/ /* SIGxx,SIGyy interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; *EXX=*SXX=*PR=*SXXE=*SPPE=*EXXE=0; *VX=*MVX=0; *VY=*MVY=0; /* Horizontal,Vertical limits for interpolation calc */ if(x>(gx[m10]+gx[m10+1])/2.0) m10++; if(y>(gy[m20]+gy[m20+1])/2.0) m20++; if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2; if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0); n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0); /* P interpolation ------------------------ */ m3=m10*ynumy+m20; *EXX =(1.0-e)*(1.0-n)*exx[m3]+(1.0-e)*n*exx[m3+1]+e*(1.0-n)*exx[m3+ynumy]+e*n*exx[m3+ynumy+1]; *SXX =(1.0-e)*(1.0-n)*sxx[m3]+(1.0-e)*n*sxx[m3+1]+e*(1.0-n)*sxx[m3+ynumy]+e*n*sxx[m3+ynumy+1]; *PR =(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1]; *SPPE=(1.0-e)*(1.0-n)*sppe[m3]+(1.0-e)*n*sppe[m3+1]+e*(1.0-n)*sppe[m3+ynumy]+e*n*sppe[m3+ynumy+1]; *SXXE=(1.0-e)*(1.0-n)*sxxe[m3]+(1.0-e)*n*sxxe[m3+1]+e*(1.0-n)*sxxe[m3+ynumy]+e*n*sxxe[m3+ynumy+1]; *EXXE=(1.0-e)*(1.0-n)*exxe[m3]+(1.0-e)*n*exxe[m3+1]+e*(1.0-n)*exxe[m3+ynumy]+e*n*exxe[m3+ynumy+1]; *VX=( (1.0-e)*(1.0-n)*(vx[m3-1]+vx[m3-ynumy-1])+(1.0-e)*n*(vx[m3]+vx[m3-ynumy]) +e*(1.0-n)*(vx[m3+ynumy-1]+vx[m3-1])+e*n*(vx[m3+ynumy]+vx[m3]) )*0.5 *(1.0-xrat); *VY=( (1.0-e)*(1.0-n)*(vy[m3-ynumy]+vy[m3-ynumy-1])+(1.0-e)*n*(vy[m3-ynumy+1]+vy[m3-ynumy]) +e*(1.0-n)*(vy[m3]+vy[m3-1])+e*n*(vy[m3+1]+vy[m3]) )*0.5 *(1.0-xrat); *MVX=( (1.0-e)*(1.0-n)*(mvx[m3-1]+mvx[m3-ynumy-1])+(1.0-e)*n*(mvx[m3]+mvx[m3-ynumy]) +e*(1.0-n)*(mvx[m3+ynumy-1]+mvx[m3-1])+e*n*(mvx[m3+ynumy]+mvx[m3]) )*0.5 *(1.0-xrat); *MVY=( (1.0-e)*(1.0-n)*(mvy[m3-ynumy]+mvy[m3-ynumy-1])+(1.0-e)*n*(mvy[m3-ynumy+1]+mvy[m3-ynumy]) +e*(1.0-n)*(mvy[m3]+mvy[m3-1])+e*n*(mvy[m3+1]+mvy[m3]) )*0.5 *(1.0-xrat); /* End SIGxx,SIGyy interpolation ------------------------ */ /**/ /**/ /**/ /* Vx interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; /* Horizontal,Vertical limits for interpolation calc */ if(y<(gy[m20]+gy[m20+1])/2.0) m20-=1; if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2; if(m20<0) m20=0; if(m20>ynumy-3) m20=ynumy-3; /* Calc normalized distances */ e=(x-gx[m10])/(gx[m10+1]-gx[m10]); n=(y-(gy[m20]+gy[m20+1])/2.0)/((gy[m20+2]-gy[m20])/2.0); /* Vx interpolation ------------------------ */ m3=m10*ynumy+m20; //*VX=(1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1]; //*MVX=(1.0-e)*(1.0-n)*mvx[m3]+(1.0-e)*n*mvx[m3+1]+e*(1.0-n)*mvx[m3+ynumy]+e*n*mvx[m3+ynumy+1]; // Include small weight (xrat) from farther away nodes for velocities *VX+=((1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1]) *xrat; *MVX+=((1.0-e)*(1.0-n)*mvx[m3]+(1.0-e)*n*mvx[m3+1]+e*(1.0-n)*mvx[m3+ynumy]+e*n*mvx[m3+ynumy+1]) *xrat; /* End Vx interpolation ------------------------ */ /**/ /**/ /**/ /* Vy interpolation ------------------------ */ // Reset and clear buffer m10=m1; m20=m2; /* Horizontal,Vertical limits for interpolation calc */ if(x<(gx[m10]+gx[m10+1])/2.0) m10-=1; if(m10<0) m10=0; if(m10>xnumx-3) m10=xnumx-3; if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2; /* Calc normalized distances */ e=(x-(gx[m10]+gx[m10+1])/2.0)/((gx[m10+2]-gx[m10])/2.0); n=(y-gy[m20])/(gy[m20+1]-gy[m20]); /* Vy interpolation ------------------------ */ m3=m10*ynumy+m20; //*VY=(1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1]; //*MVY=(1.0-e)*(1.0-n)*mvy[m3]+(1.0-e)*n*mvy[m3+1]+e*(1.0-n)*mvy[m3+ynumy]+e*n*mvy[m3+ynumy+1]; // Include small weight (xrat) from farther away nodes for velocities *VY+=((1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1]) *xrat; *MVY+=((1.0-e)*(1.0-n)*mvy[m3]+(1.0-e)*n*mvy[m3+1]+e*(1.0-n)*mvy[m3+ynumy]+e*n*mvy[m3+ynumy+1]) *xrat; /* End Vy interpolation ------------------------ */ /* fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar(); */ } /* Calculation of SIGij by Interpolation */ /* Calculation of Vx,Vy, EPSxx*SIGxx,EPSyy*SIGyy,EPSxy*SIGxy by Interpolation */ // Not adapted for parallelization void allinters(double x, double y) /* x,y - XY location of point for Vx,Vy calc */ { /* Counters */ long int m1,m2,m3,m10,m20,m1min,m1max,m2min,m2max; /* en-NormalisedDistance */ double ival; /**/ /**/ /* Check X,Y */ if(x<0) x=0; else if(x>xsize) x=xsize; if(y<0) y=0; else if(y>ysize) y=ysize; /**/ /**/ /**/ /* Up Left Node X,Y Num */ wn[0]=m10=m1serch(x); wn[1]=m20=m2serch(y); /**/ /**/ /**/ /* SIGxy*EPSxy interpolation ------------------------ */ /* Buffer clear */ eps[13]=0; /* Horizontal,Vertical limits for interpolation calc */ m1min=m10; if(m1min<1) m1min=1; if(m1min>xnumx-3) m1min=xnumx-3; m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2; m1min=m1min-intermod; if(m1min<1) m1min=1; /**/ m2min=m20; if(m2min<1) m2min=1; if(m2min>ynumy-3) m2min=ynumy-3; m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2; m2min=m2min-intermod; if(m2min<1) m2min=1; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,0,0); /**/ /* SIGxy,EPSxy Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[13]+=ival*sxy[m3]*sxy[m3]/(2.0*nu[m3]); } /* End SIGxy*EPSxy interpolation ------------------------ */ /**/ /**/ /**/ /* SIGxx*EPSxx, SIGyy*EPSyy interpolation ------------------------ */ /* Buffer clear */ eps[14]=0; /* Horizontal,Vertical limits for interpolation calc */ m1min=m10; if(x>(gx[m10]+gx[m10+1])/2.0) m1min+=1; if(m1min<1) m1min=1; if(m1min>xnumx-2) m1min=xnumx-2; m1max=m1min+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1; m1min=m1min-intermod; if(m1min<1) m1min=1; /**/ m2min=m20; if(y>(gy[m20]+gy[m20+1])/2.0) m2min+=1; if(m2min<1) m2min=1; if(m2min>ynumy-2) m2min=ynumy-2; m2max=m2min+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1; m2min=m2min-intermod; if(m2min<1) m2min=1; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,-1,-1); /**/ /* SIGxx,EPSxx,SIGyy,EPSyy,P Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[14]+=ival*sxx[m3]*sxx[m3]/(2.0*nd[m3]); } /* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */ /**/ /**/ /**/ /* Vx interpolation ------------------------ */ /* Buffer clear */ eps[11]=0; /* Horizontal,Vertical limits for interpolation calc */ m1min=m10-intermod; if(m1min<0) m1min=0; m1max=m10+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1; /**/ m2min=m20; if(y<(gy[m20]+gy[m20+1])/2.0) m2min-=1; if(m2min<0) m2min=0; if(m2min>ynumy-3) m2min=ynumy-3; m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2; m2min=m2min-intermod; if(m2min<0) m2min=0; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,0,+1); /**/ /* Vx Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[11]+=ival*vx[m3]; } /* End Vx interpolation ------------------------ */ /**/ /**/ /**/ /* Vy interpolation ------------------------ */ /* Buffer clear */ eps[12]=0; /* Horizontal,Vertical limits for interpolation calc */ m1min=m10; if(x<(gx[m10]+gx[m10+1])/2.0) m1min-=1; if(m1min<0) m1min=0; if(m1min>xnumx-3) m1min=xnumx-3; m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2; m1min=m1min-intermod; if(m1min<0) m1min=0; /**/ m2min=m20-intermod; if(m2min<0) m2min=0; m2max=m20+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,+1,0); /**/ /* Vy Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[12]+=ival*vy[m3]; } /* End Vy interpolation ------------------------ */ /* fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar(); */ } /* Calculation of Vx,Vy, EPSxx*SIGxx,EPSyy*SIGyy,EPSxy*SIGxy by Interpolation */ /* Calculation of Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy by Interpolation */ // Not adapted for parallelization void allinteri(double x, double y) /* x,y - XY location of point for Vx,Vy calc */ { /* Counters */ long int m1,m2,m3,m10,m20,m1min,m1max,m2min,m2max; /* en-NormalisedDistance */ double ival,xrat; /**/ /**/ /* Check X,Y */ if(x<0) x=0; else if(x>xsize) x=xsize; if(y<0) y=0; else if(y>ysize) y=ysize; /**/ /**/ /**/ /* Check weighting for interpolation */ xrat=2.0/3.0; if(x<(gx[0]+gx[1])/2.0) xrat=1.0; if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0; if(y<(gy[0]+gy[1])/2.0) xrat=1.0; if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0; /**/ /**/ /**/ /* Up Left Node X,Y Num */ wn[0]=m10=m1serch(x); wn[1]=m20=m2serch(y); /**/ /**/ /**/ /* EPSxy, SPINxy interpolation ------------------------ */ /* Buffer clear */ eps[4]=eps[30]=0; /* Horizontal,Vertical limits for interpolation calc */ m1min=m10; if(m1min<1) m1min=1; if(m1min>xnumx-3) m1min=xnumx-3; m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2; m1min=m1min-intermod; if(m1min<1) m1min=1; /**/ m2min=m20; if(m2min<1) m2min=1; if(m2min>ynumy-3) m2min=ynumy-3; m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2; m2min=m2min-intermod; if(m2min<1) m2min=1; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,0,0); /**/ /* SIGxy,EPSxy Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[4]+=ival*exy[m3]; eps[30]+=ival*esp[m3]; } /* End SIGxy*EPSxy interpolation ------------------------ */ /**/ /**/ /**/ /* EPSxx, EPSyy, P interpolation ------------------------ */ /* Buffer clear */ eps[6]=eps[10]=eps[11]=eps[12]=0; /* Horizontal,Vertical limits for interpolation calc */ m1min=m10; if(x>(gx[m10]+gx[m10+1])/2.0) m1min+=1; if(m1min<1) m1min=1; if(m1min>xnumx-2) m1min=xnumx-2; m1max=m1min+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1; m1min=m1min-intermod; if(m1min<1) m1min=1; /**/ m2min=m20; if(y>(gy[m20]+gy[m20+1])/2.0) m2min+=1; if(m2min<1) m2min=1; if(m2min>ynumy-2) m2min=ynumy-2; m2max=m2min+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1; m2min=m2min-intermod; if(m2min<1) m2min=1; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,-1,-1); /**/ /* SIGxx,EPSxx,SIGyy,EPSyy,P Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[6]+=ival*exx[m3]; eps[10]+=ival*pr[m3]; eps[11]+=ival*(vx[m3-1]+vx[m3-ynumy-1])*0.5*(1.0-xrat); eps[12]+=ival*(vy[m3-ynumy]+vy[m3-ynumy-1])*0.5*(1.0-xrat); } /* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */ /* depthp(x,y);eps[10]=eps[50]; */ /**/ /**/ /**/ /* Vx interpolation ------------------------ */ /* Horizontal,Vertical limits for interpolation calc */ m1min=m10-intermod; if(m1min<0) m1min=0; m1max=m10+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1; /**/ m2min=m20; if(y<(gy[m20]+gy[m20+1])/2.0) m2min-=1; if(m2min<0) m2min=0; if(m2min>ynumy-3) m2min=ynumy-3; m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2; m2min=m2min-intermod; if(m2min<0) m2min=0; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,0,+1); /**/ /* Vx Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[11]+=ival*vx[m3]*xrat; } /* End Vx interpolation ------------------------ */ /**/ /**/ /**/ /* Vy interpolation ------------------------ */ /* Horizontal,Vertical limits for interpolation calc */ m1min=m10; if(x<(gx[m10]+gx[m10+1])/2.0) m1min-=1; if(m1min<0) m1min=0; if(m1min>xnumx-3) m1min=xnumx-3; m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2; m1min=m1min-intermod; if(m1min<0) m1min=0; /**/ m2min=m20-intermod; if(m2min<0) m2min=0; m2max=m20+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1; /**/ /* Interpolation weights calc after Fornberg (1996) */ nodewt(m1min,m1max,m2min,m2max,x,y,+1,0); /**/ /* Vy Interpolate after interpolation weights */ for (m1=m1min;m1<=m1max;m1++) for (m2=m2min;m2<=m2max;m2++) { /* Current node num, wt */ m3=m1*ynumy+m2; ival=cn[m1-m1min][1]*cn[m2-m2min][0]; eps[12]+=ival*vy[m3]*xrat; } /* End Vy interpolation ------------------------ */ /* fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar(); */ } /* Calculation of Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy by Interpolation */
trmv_x_dia_u_hi_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX return ONAME_omp(alpha, A, x, beta, y); #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
DRB020-privatemissing-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be put as private to avoid race condition Data race pair: tmp@65 vs. tmp@66 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for schedule(dynamic) for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } return 0; }
tools.h
#ifndef TOOLS_H #define TOOLS_H using namespace std; const char sym[3] = {'*', '@', 'L'}; void init_mat_const(int *mat, int n, int c){ #pragma omp parallel for for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ long index = (long)i*n + (long)j; mat[index] = c; } } } void print_mat(int *mat, int n){ for(int i=0; i<n; ++i){ for(int j=0; j<n; ++j){ long index = i*n + j; printf("%c ", sym[mat[index]]); } printf("\n"); } } // ubicar cachorro void ubicar_lobito(int *mat, int n){ int i = rand() % n; int j = rand() % n; mat[i*n + j] = 1; } void ubicar_manada_det(int *mat, int n, vector<pair<int,int>> &man){ // dividir el dominio para k lobos // considerar particion 2D, factorizar k en dos valores 'similares'. } void ubicar_manada_nondet(int *mat, int n, vector<pair<int,int>> &man){ // pintar en la matriz if(man.size() > 0){ // todos parten de un mismo lugar int i = rand() % n; int j = rand() % n; while(mat[i*n + j] == 1){ i = rand() % n; j = rand() % n; } mat[i*n + j] = 2; // inicializar la manada con (i,j) for(int k=0; k<man.size(); ++k){ // cada lobo parte en distinto lugar //int i = rand() % n; //int j = rand() % n; //while(mat[i*n + j] == 1){ // i = rand() % n; // j = rand() % n; //} //mat[i*n + j] = 2; man[k] = {i,j}; } } } // busqueda deterministica std::pair<int, int> busqueda_det(int *mat, int n, vector<pair<int,int>> &m){} // busqueda no-deterministica std::pair<int, int> busqueda_nodet(int *mat, int n, vector<pair<int,int>> &m){ int perdido = 1; // while (no encontrado) while(perdido){ // un paso de busqueda for(int k=0; k<m.size(); ++k){ mat[m[k].first*n + m[k].second] = 0; int i = (m[k].first + ((rand() % 3) - 1)) % n; int j = (m[k].second + ((rand() % 3) - 1)) % n; m[k] = {i,j}; mat[m[k].first*n + m[k].second] = 2; } //#ifdef DEBUG print_mat(mat, n); getchar(); //#endif } return {0,0}; } #endif
drt_dft_solver.h
#ifndef _DRT_DFT_SOLVER_ #define _DRT_DFT_SOLVER_ #include <complex> #include "toefl/toefl.h" #include "blueprint.h" #include "equations.h" namespace toefl { /*! @brief Solver for dirichlet type x-boundary conditions of the toefl equations. * @ingroup solvers */ template< size_t n> class DRT_DFT_Solver { public: typedef Matrix<double, TL_DRT_DFT> Matrix_Type; /*! @brief Construct a solver for dirichlet type boundary conditions * * The constructor allocates storage for the solver * and initializes all fourier coefficients as well as * all low level solvers needed. * @param blueprint Contains all the necessary parameters. * @throw Message If your parameters are inconsistent. */ DRT_DFT_Solver( const Blueprint& blueprint); /*! @brief Prepare Solver for execution * * This function takes the fields and computes the missing * one according to the target parameter passed. After that * it performs three initializing steps (one onestep-, * one twostep-method and the threestep-method used in the step function) * in order to initialize the karniadakis scheme. The actual time is * thus T_0 + 3*dt after initialisation. * @param v Container with three non void matrices * @param t which Matrix is missing? */ void init( std::array< Matrix<double,TL_DRT_DFT>, n>& v, enum target t); /** * @brief Perform first initializing step * */ void first_step(); /** * @brief Perform second initializing step * * After that the step function can be used */ void second_step(); /*! @brief Perform a step by the 3 step Karniadakis scheme * * @attention At least one call of first_step() and second_step() is necessary * */ void step(){ step_<TL_ORDER3>();} /*! @brief Get the result You get the solution matrix of the current timestep. @param t The field you want @return A Read only reference to the field @attention The reference is only valid until the next call to the step() function! */ const Matrix<double, TL_DRT_DFT>& getField( enum target t) const; /*! @brief Get the result Use this function when you want to call step() without destroying the solution. @param m In exchange for the solution matrix you have to provide storage for further calculations. The field is swapped in. @param t The field you want. @attention The fields you get are not the ones of the current timestep. You get the fields that are not needed any more. This means the densities are 4 timesteps "old" whereas the potential is the one of the last timestep. */ void getField( Matrix<double, TL_DRT_DFT>& m, enum target t); /*! @brief Get the parameters of the solver. @return The parameters in use. @note You cannot change parameters once constructed. */ const Blueprint& blueprint() const { return blue;} private: typedef std::complex<double> complex; //methods void init_coefficients( const Boundary& bound, const Physical& phys); void compute_cphi();//multiply cphi //void first_steps(); template< enum stepper S> void step_(); //members const size_t rows, cols; const size_t crows, ccols; const Blueprint blue; /////////////////fields////////////////////////////////// //GhostMatrix<double, TL_DRT_DFT> ghostdens, ghostphi; std::array< Matrix<double, TL_DRT_DFT>, n> dens, phi, nonlinear; /////////////////Complex (void) Matrices for fourier transforms/////////// std::array< Matrix< complex>, n> cdens, cphi; ///////////////////Solvers//////////////////////// Arakawa arakawa; Karniadakis<n, complex, TL_DRT_DFT> karniadakis; DRT_DFT drt_dft; /////////////////////Coefficients////////////////////// Matrix< std::array< double, n> > phi_coeff; std::array< Matrix< double>, n-1> gamma_coeff; }; template< size_t n> DRT_DFT_Solver<n>::DRT_DFT_Solver( const Blueprint& bp): rows( bp.algorithmic().ny ), cols( bp.algorithmic().nx ), crows( cols), ccols( rows/2+1), blue( bp), //fields dens( MatrixArray<double, TL_DRT_DFT,n>::construct( rows, cols)), phi( dens), nonlinear( dens), cdens( MatrixArray<complex, TL_NONE, n>::construct( crows, ccols)), cphi(cdens), //Solvers arakawa( bp.algorithmic().h), karniadakis(rows, cols, crows, ccols, bp.algorithmic().dt), drt_dft( rows, cols, fftw_convert( bp.boundary().bc_x), FFTW_MEASURE), //Coefficients phi_coeff( crows, ccols), gamma_coeff( MatrixArray< double, TL_NONE, n-1>::construct( crows, ccols)) { bp.consistencyCheck(); Physical phys = bp.physical(); if( bp.isEnabled( TL_GLOBAL)) { std::cerr << "WARNING: GLOBAL solver not implemented yet! \n\ Switch to local solver...\n"; } init_coefficients( bp.boundary(), phys); } //aware of BC template< size_t n> void DRT_DFT_Solver<n>::init_coefficients( const Boundary& bound, const Physical& phys) { Matrix< QuadMat< complex, n> > coeff( crows, ccols); double laplace; const complex dymin( 0, 2.*M_PI/bound.ly); const double kxmin2 = M_PI*M_PI/(double)(bound.lx*bound.lx), kymin2 = 4.*M_PI*M_PI/(double)(bound.ly*bound.ly); double add; if( bound.bc_x == TL_DST00 || bound.bc_x == TL_DST10) add = 1.0; else add = 0.5; Equations e( phys, blue.isEnabled( TL_MHW)); Poisson p( phys); // drt_dft is transposing so i is the x index for( unsigned i = 0; i<crows; i++) for( unsigned j = 0; j<ccols; j++) { laplace = - kxmin2*(double)((i+add)*(i+add)) - kymin2*(double)(j*j); if( n == 2) gamma_coeff[0](i,j) = p.gamma1_i( laplace); else if( n == 3) { gamma_coeff[0](i,j) = p.gamma1_i( laplace); gamma_coeff[1](i,j) = p.gamma1_z( laplace); } e( coeff( i,j), laplace, (double)j*dymin); p( phi_coeff(i,j), laplace); } double norm = fftw_normalisation( bound.bc_x, cols)*(double)rows; karniadakis.init_coeff( coeff, norm); } //unaware of BC except FFT template< size_t n> void DRT_DFT_Solver<n>::init( std::array< Matrix<double, TL_DRT_DFT>,n>& v, enum target t) { //fourier transform input into cdens for( unsigned k=0; k<n; k++) { #ifdef TL_DEBUG if( v[k].isVoid()) throw Message("You gave me a void Matrix!!", _ping_); #endif drt_dft.r2c_T( v[k], cdens[k]); } //don't forget to normalize coefficients!! double norm = fftw_normalisation( blue.boundary().bc_x, cols)*(double)rows; for( unsigned k=0; k<n; k++) for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols;j++) cdens[k](i,j) /= norm; switch( t) //which field must be computed? { case( TL_ELECTRONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>0; k--) swap_fields( cdens[k], cdens[k-1]); //now solve for cdens[0] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[0](i,j) = cphi[0](i,j)/phi_coeff(i,j)[0]; for( unsigned k=0; k<n && k!=0; k++) cdens[0](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[0]; } break; case( TL_IONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>1; k--) swap_fields( cdens[k], cdens[k-1]); //solve for cdens[1] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[1](i,j) = cphi[0](i,j) /phi_coeff(i,j)[1]; for( unsigned k=0; k<n && k!=1; k++) cdens[1](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[1]; } break; case( TL_IMPURITIES): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>2; k--) //i.e. never for n = 3 swap_fields( cdens[k], cdens[k-1]); //solve for cdens[2] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[2](i,j) = cphi[0](i,j) /phi_coeff(i,j)[2]; for( unsigned k=0; k<n && k!=2; k++) cdens[2](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[2]; } break; case( TL_POTENTIAL): //solve for cphi for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols/2+1; j++) { cphi[0](i,j) = 0; for( unsigned k=0; k<n && k!=2; k++) cphi[0](i,j) += cdens[k](i,j)*phi_coeff(i,j)[k]; } break; case( TL_ALL): throw Message( "TL_ALL not treated yet!", _ping_); } //compute the rest cphi[k] for( unsigned k=0; k<n-1; k++) for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[k+1](i,j) = gamma_coeff[k](i,j)*cphi[0](i,j); //backtransform to x-space for( unsigned k=0; k<n; k++) { drt_dft.c_T2r( cdens[k], dens[k]); drt_dft.c_T2r( cphi[k], phi[k]); } //now the density and the potential is given in x-space //first_steps(); } template< size_t n> void DRT_DFT_Solver<n>::getField( Matrix<double, TL_DRT_DFT>& m, enum target t) { #ifdef TL_DEBUG if(m.isVoid()) throw Message( "You may not swap in a void Matrix!\n", _ping_); #endif switch( t) { case( TL_ELECTRONS): swap_fields( m, nonlinear[0]); break; case( TL_IONS): swap_fields( m, nonlinear[1]); break; case( TL_IMPURITIES): swap_fields( m, nonlinear[2]); break; case( TL_POTENTIAL): swap_fields( m, cphi[0]); break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } } template< size_t n> const Matrix<double, TL_DRT_DFT>& DRT_DFT_Solver<n>::getField( enum target t) const { Matrix<double, TL_DRT_DFT> const * m = 0; switch( t) { case( TL_ELECTRONS): m = &dens[0]; break; case( TL_IONS): m = &dens[1]; break; case( TL_IMPURITIES): m = &dens[2]; break; case( TL_POTENTIAL): m = &phi[0]; break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } return *m; } template< size_t n> void DRT_DFT_Solver<n>::first_step() { karniadakis.template invert_coeff<TL_EULER>( ); step_<TL_EULER>(); } template< size_t n> void DRT_DFT_Solver<n>::second_step() { karniadakis.template invert_coeff<TL_ORDER2>(); step_<TL_ORDER2>(); karniadakis.template invert_coeff<TL_ORDER3>(); } template< size_t n> void DRT_DFT_Solver<n>::compute_cphi() { if( n==2) { #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j); #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); } else if( n==3) { #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j) + phi_coeff(i,j)[2]*cdens[2](i,j); #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) { cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); cphi[2](i,j) = gamma_coeff[1](i,j)*cphi[0](i,j); } } } //unaware of BC except FFT template< size_t n> template< enum stepper S> void DRT_DFT_Solver<n>::step_() { //1. Compute nonlinearity #pragma omp parallel for for( unsigned k=0; k<n; k++) { GhostMatrix<double, TL_DRT_DFT> ghostphi{ rows, cols, TL_PERIODIC, blue.boundary().bc_x}; GhostMatrix<double, TL_DRT_DFT> ghostdens{ rows, cols, TL_PERIODIC, blue.boundary().bc_x}; swap_fields( dens[k], ghostdens); //now dens[j] is void swap_fields( phi[k], ghostphi); //now phi[j] is void ghostdens.initGhostCells( ); ghostphi.initGhostCells( ); arakawa( ghostdens, ghostphi, nonlinear[k]); swap_fields( dens[k], ghostdens); //now ghostdens is void swap_fields( phi[k], ghostphi); //now ghostphi is void } //2. perform karniadakis step karniadakis.template step_i<S>( dens, nonlinear); //3. solve linear equation //3.1. transform v_hut #pragma omp parallel for for( unsigned k=0; k<n; k++) drt_dft.r2c_T( dens[k], cdens[k]); //3.2. perform karniadaksi step and multiply coefficients for phi karniadakis.step_ii( cdens); compute_cphi(); //3.3. backtransform #pragma omp parallel for for( unsigned k=0; k<n; k++) { drt_dft.c_T2r( cdens[k], dens[k]); drt_dft.c_T2r( cphi[k], phi[k]); } } }//namespace toefl #endif //_DRT_DFT_SOLVER_
schedbench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include "common.h" #include "schedbench.h" int cksz, itersperthr = 128; char testName[32]; int schedbench_main(int argc, char **argv) { ompbench_init(argc, argv); /* GENERATE REFERENCE TIME */ reference("reference time", &refer); /* TEST STATIC */ benchmark("STATIC", &teststatic); /* TEST STATIC,n */ cksz = 1; while (cksz <= itersperthr) { sprintf(testName, "STATIC %d", cksz); benchmark(testName, &teststaticn); cksz *= 2; } /* TEST DYNAMIC,n */ cksz = 1; while (cksz <= itersperthr) { sprintf(testName, "DYNAMIC %d", cksz); benchmark(testName, &testdynamicn); cksz *= 2; } /* TEST GUIDED,n */ cksz = 1; while (cksz <= itersperthr / nthreads) { sprintf(testName, "GUIDED %d", cksz); benchmark(testName, &testguidedn); cksz *= 2; } finalise(); return EXIT_SUCCESS; } static void refer() { int i, j; for (j = 0; j < innerreps; j++) { for (i = 0; i < itersperthr; i++) { delay(delaylength); } } } void teststatic() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(static) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } } void teststaticn() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(static,cksz) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } } void testdynamicn() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(dynamic,cksz) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } } void testguidedn() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(guided,cksz) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } }
facedetectcnn.h
/* By downloading, copying, installing or using the software you agree to this license. If you do not agree to this license, do not download, install, copy or use the software. License Agreement For libfacedetection (3-clause BSD License) Copyright (c) 2018-2019, Shiqi Yu, all rights reserved. shiqi.yu@gmail.com Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall copyright holders or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. */ #pragma once //#define _ENABLE_AVX2 //Please enable it if X64 CPU //#define _ENABLE_NEON //Please enable it if ARM CPU int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!! unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image! //DO NOT EDIT the following code if you don't really understand it. #if defined(_ENABLE_AVX2) #include <immintrin.h> #endif #if defined(_ENABLE_NEON) #include "arm_neon.h" #define _ENABLE_INT8_CONV #endif #if defined(_ENABLE_AVX2) #define _MALLOC_ALIGN 256 #else #define _MALLOC_ALIGN 128 #endif #if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON) #error Cannot enable the two of SSE2 AVX and NEON at the same time. #endif #if defined(_OPENMP) #include <omp.h> #endif #include <string.h> #include <vector> #include <iostream> using namespace std; void* myAlloc(size_t size); void myFree_(void* ptr); #define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0); #ifndef MIN # define MIN(a,b) ((a) > (b) ? (b) : (a)) #endif #ifndef MAX # define MAX(a,b) ((a) < (b) ? (b) : (a)) #endif typedef struct FaceRect_ { float score; int x; int y; int w; int h; }FaceRect; class CDataBlob { public: float * data_float; signed char * data_int8; int width; int height; int channels; int floatChannelStepInByte; int int8ChannelStepInByte; float int8float_scale; bool int8_data_valid; public: CDataBlob() { data_float = 0; data_int8 = 0; width = 0; height = 0; channels = 0; floatChannelStepInByte = 0; int8ChannelStepInByte = 0; int8float_scale = 1.0f; int8_data_valid = false; } CDataBlob(int w, int h, int c) { data_float = 0; data_int8 = 0; create(w, h, c); } ~CDataBlob() { setNULL(); } void setNULL() { if (data_float) myFree(&data_float); if (data_int8) myFree(&data_int8); width = height = channels = floatChannelStepInByte = int8ChannelStepInByte = 0; int8float_scale = 1.0f; int8_data_valid = false; } bool create(int w, int h, int c) { setNULL(); width = w; height = h; channels = c; //alloc space for float array int remBytes = (sizeof(float)* channels) % (_MALLOC_ALIGN / 8); if (remBytes == 0) floatChannelStepInByte = channels * sizeof(float); else floatChannelStepInByte = (channels * sizeof(float)) + (_MALLOC_ALIGN / 8) - remBytes; data_float = (float*)myAlloc(width * height * floatChannelStepInByte); //alloc space for int8 array remBytes = (sizeof(char)* channels) % (_MALLOC_ALIGN / 8); if (remBytes == 0) int8ChannelStepInByte = channels * sizeof(char); else int8ChannelStepInByte = (channels * sizeof(char)) + (_MALLOC_ALIGN / 8) - remBytes; data_int8 = (signed char*)myAlloc(width * height * int8ChannelStepInByte); if (data_float == NULL) { cerr << "Cannot alloc memeory for float data blob: " << width << "*" << height << "*" << channels << endl; return false; } if (data_int8 == NULL) { cerr << "Cannot alloc memeory for uint8 data blob: " << width << "*" << height << "*" << channels << endl; return false; } //memset(data_float, 0, width * height * floatChannelStepInByte); //memset(data_int8, 0, width * height * int8ChannelStepInByte); //the following code is faster than memset //but not only the padding bytes are set to zero. //BE CAREFUL!!! //#if defined(_OPENMP) //#pragma omp parallel for //#endif for (int r = 0; r < this->height; r++) { for (int c = 0; c < this->width; c++) { int pixel_end = this->floatChannelStepInByte / sizeof(float); float * pF = (float*)(this->data_float + (r * this->width + c) * this->floatChannelStepInByte/sizeof(float)); for (int ch = this->channels; ch < pixel_end; ch++) pF[ch] = 0; pixel_end = this->int8ChannelStepInByte / sizeof(char); char * pI = (char*)(this->data_int8 + (r * this->width + c) * this->int8ChannelStepInByte/sizeof(char)); for (int ch = this->channels; ch < pixel_end; ch++) pI[ch] = 0; } } return true; } bool setInt8DataFromCaffeFormat(signed char * pData, int dataWidth, int dataHeight, int dataChannels) { if (pData == NULL) { cerr << "The input image data is null." << endl; return false; } if (dataWidth != this->width || dataHeight != this->height || dataChannels != this->channels) { cerr << "The dim of the data can not match that of the Blob." << endl; return false; } //create(dataWidth, dataHeight, dataChannels); for(int row = 0; row < height; row++) for (int col = 0; col < width; col++) { signed char * p = (this->data_int8 + (width * row + col) * int8ChannelStepInByte /sizeof(char)); for (int ch = 0; ch < channels; ch++) { p[ch] = pData[ch * height * width + row * width + col]; } } return true; } bool setFloatDataFromCaffeFormat(float * pData, int dataWidth, int dataHeight, int dataChannels) { if (pData == NULL) { cerr << "The input image data is null." << endl; return false; } if (dataWidth != this->width || dataHeight != this->height || dataChannels != this->channels) { cerr << "The dim of the data can not match that of the Blob." << endl; return false; } //create(dataWidth, dataHeight, dataChannels); for (int row = 0; row < height; row++) for (int col = 0; col < width; col++) { float * p = (this->data_float + (width * row + col) * floatChannelStepInByte / sizeof(float)); for (int ch = 0; ch < channels; ch++) { p[ch] = pData[ch * height * width + row * width + col]; } } return true; } bool setDataFromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep, int * pChannelMean) { if (imgData == NULL) { cerr << "The input image data is null." << endl; return false; } if (pChannelMean == NULL) { cerr << "The mean values is null." << endl; return false; } create(imgWidth, imgHeight, imgChannels); //#if defined(_OPENMP) //#pragma omp parallel for //#endif for (int r = 0; r < imgHeight; r++) { for (int c = 0; c < imgWidth; c++) { const unsigned char * pImgData = imgData + imgWidthStep * r + imgChannels * c; float * pBlobData = this->data_float + (this->width * r + c) * this->floatChannelStepInByte /sizeof(float); for (int ch = 0; ch < imgChannels; ch++) pBlobData[ch] = (float)(pImgData[ch] - pChannelMean[ch]); } } return true; } bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep, int * pChannelMean) { if (imgData == NULL) { cerr << "The input image data is null." << endl; return false; } if (pChannelMean == NULL) { cerr << "The mean values is null." << endl; return false; } if (imgChannels != 3) { cerr << "The input image must be a 3-channel RGB image." << endl; return false; } create((imgWidth+1)/2, (imgHeight+1)/2, 27); //since the pixel assignment cannot fill all the elements in the blob. //some elements in the blob should be initialized to 0 memset(data_float, 0, width * height * floatChannelStepInByte); #if defined(_OPENMP) #pragma omp parallel for #endif for (int r = 0; r < this->height; r++) { for (int c = 0; c < this->width; c++) { float * pData = this->data_float + (r * this->width + c) * this->floatChannelStepInByte / sizeof(float); for (int fy = -1; fy <= 1; fy++) { int srcy = r * 2 + fy; if (srcy < 0 || srcy >= imgHeight) //out of the range of the image continue; for (int fx = -1; fx <= 1; fx++) { int srcx = c * 2 + fx; if (srcx < 0 || srcx >= imgWidth) //out of the range of the image continue; const unsigned char * pImgData = imgData + imgWidthStep * srcy + imgChannels * srcx; int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image pData[output_channel_offset] = (float)(pImgData[0] - pChannelMean[0]); pData[output_channel_offset+1] = (float)(pImgData[1] - pChannelMean[1]); pData[output_channel_offset+2] = (float)(pImgData[2] - pChannelMean[2]); } } } } return true; } float getElementFloat(int x, int y, int channel) { if (this->data_float) { if (x >= 0 && x < this->width && y >= 0 && y < this->height && channel >= 0 && channel < this->channels) { float * p = (float*)(this->data_float + (y*this->width + x)*this->floatChannelStepInByte / sizeof(float)); return p[channel]; } } return 0.f; } int getElementint8(int x, int y, int channel) { if (this->data_int8 && this->int8_data_valid) { if (x >= 0 && x < this->width && y >= 0 && y < this->height && channel >= 0 && channel < this->channels) { signed char * p = this->data_int8 + (y*this->width + x)*this->int8ChannelStepInByte/sizeof(char); return p[channel]; } } return 0; } friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob) { output << "DataBlob Size (Width, Height, Channel) = (" << dataBlob.width << ", " << dataBlob.height << ", " << dataBlob.channels << ")" << endl; for (int ch = 0; ch < dataBlob.channels; ch++) { output << "Channel " << ch << ": " << endl; for (int row = 0; row < dataBlob.height; row++) { output << "("; for (int col = 0; col < dataBlob.width; col++) { float * p = (dataBlob.data_float + (dataBlob.width * row + col) * dataBlob.floatChannelStepInByte/sizeof(float)); output << p[ch]; if (col != dataBlob.width - 1) output << ", "; } output << ")" << endl; } } return output; } }; class Filters { public: vector<CDataBlob *> filters; int pad; int stride; float scale; //element * scale = original value }; bool convolution(CDataBlob *inputData, const Filters* filters, CDataBlob *outputData); bool maxpooling2x2S2(const CDataBlob *inputData, CDataBlob *outputData); bool concat4(const CDataBlob *inputData1, const CDataBlob *inputData2, const CDataBlob *inputData3, const CDataBlob *inputData4, CDataBlob *outputData); bool scale(CDataBlob * dataBlob, float scale); bool relu(const CDataBlob *inputOutputData); bool priorbox(const CDataBlob * featureData, const CDataBlob * imageData, int num_sizes, float * pWinSizes, CDataBlob * outputData); bool normalize(CDataBlob * inputOutputData, float * pScale); bool blob2vector(const CDataBlob * inputData, CDataBlob * outputData, bool isFloat); bool detection_output(const CDataBlob * priorbox, const CDataBlob * loc, const CDataBlob * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob * outputData); /* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */ bool softmax1vector2class(const CDataBlob *inputOutputData); vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
GB_binop__pow_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp64) // C=scalar+B GB (_bind1st__pow_fp64) // C=scalar+B' GB (_bind1st_tran__pow_fp64) // C=A+scalar GB (_bind2nd__pow_fp64) // C=A'+scalar GB (_bind2nd_tran__pow_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = GB_pow (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GB_pow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GB_pow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__one_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_bool_bool // op(A') function: GB_unop_tran__one_bool_bool // C type: bool // A type: bool // cast: ; // unaryop: cij = true #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = true ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_bool_bool ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = true ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = true ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2035,2048)),ceild(8*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(8*t1+Nx+7,2048)),floord(16*t2+Nx+3,2048)),floord(8*t3+Nx-5,2048)),floord(16*t1-16*t2+Nz+Nx+5,2048));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),512*t4+510);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
pi-v8.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if !_DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if !_DEBUG_ start= omp_get_wtime(); #endif /* do computation -- using all available threads */ // WARNING : correct code #pragma omp parallel private(i,x) reduction(+:sum) { #if _DEBUG_ int id = omp_get_thread_num(); #endif #pragma omp for schedule(static,1) for (i=0; i < num_steps; i++) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } pi = step * sum; #if !_DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
laplace_par.h
#ifndef _LAPLACE_PAR_ #define _LAPLACE_PAR_ #include<omp.h> template<int SIZE> inline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2]) { #pragma omp parallel for for (int i = 0; i < SIZE + 2; i++) for (int j = 0; j < SIZE + 2; j++) { a[i][j] = 0.0; b[i][j] = 0.0; } } template<int SIZE> inline void time_step(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n) { if (n % 2 == 0) { #pragma omp parallel for for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) / 4.0; } else { #pragma omp parallel for for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1]) / 4.0; } } #endif // !_LAPLACE_PAR_
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, offset); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->inner_length; return 0; } static int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, 0); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->digest_length; return 0; } int blake2bp_init( blake2bp_state *S, size_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, size_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = ( uint32_t ) left + ( uint32_t ) inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; if(S->outlen != outlen) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, size_t outlen, size_t inlen, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S,hash) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); }
static.c
static int bork; void bar(void); void foobar (void) { #pragma omp parallel { #pragma omp for lastprivate(bork) for (bork = 0; bork < 100; bork++) { bar(); } } }
Graph.h
/* * Graph.h * * Created on: 01.06.2014 * Author: Christian Staudt (christian.staudt@kit.edu), Klara Reichard (klara.reichard@gmail.com), Marvin Ritter (marvin.ritter@gmail.com) */ #ifndef GRAPH_H_ #define GRAPH_H_ #include <algorithm> #include <vector> #include <stack> #include <queue> #include <utility> #include <stdexcept> #include <functional> #include <unordered_set> #include "../Globals.h" #include "Coordinates.h" #include "../viz/Point.h" #include "../auxiliary/Random.h" #include "../auxiliary/FunctionTraits.h" #include "../auxiliary/Log.h" namespace NetworKit { /** * A weighted edge used for the graph constructor with * initializer list syntax. */ struct WeightedEdge { node u, v; edgeweight weight; WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) { } }; inline bool operator<(const WeightedEdge& e1, const WeightedEdge& e2) { return e1.weight < e2.weight; } struct Edge { node u, v; Edge(node _u, node _v, bool sorted = false) { if (sorted) { u = std::min(_u, _v); v = std::max(_u, _v); } else { u = _u; v = _v; } } }; inline bool operator==(const Edge& e1, const Edge& e2) { return e1.u == e2.u && e1.v == e2.v; } } namespace std { template<> struct hash<NetworKit::Edge> { size_t operator()(const NetworKit::Edge& e) const { return hash_node(e.u) ^ hash_node(e.v); } hash<NetworKit::node> hash_node; }; } namespace NetworKit { /** * @ingroup graph * A graph (with optional weights) and parallel iterator methods. */ class Graph final { friend class ParallelPartitionCoarsening; friend class GraphBuilder; private: // graph attributes count id; //!< unique graph id, starts at 0 std::string name; //!< name of the graph, initially G#ID // scalars count n; //!< current number of nodes count m; //!< current number of edges count storedNumberOfSelfLoops; //!< current number of self loops, edges which have the same origin and target node z; //!< current upper bound of node ids, z will be the id of the next node edgeid omega; //!< current upper bound of edge ids, will be the id of the next edge count t; //!< current time step bool weighted; //!< true if the graph is weighted, false otherwise bool directed; //!< true if the graph is directed, false otherwise bool edgesIndexed; //!< true if edge ids have been assigned // per node data std::vector<bool> exists; //!< exists[v] is true if node v has not been removed from the graph Coordinates<float> coordinates; //!< coordinates of nodes (if present) std::vector<count> inDeg; //!< only used for directed graphs, number of edges incoming per node std::vector<count> outDeg; //!< degree of every node, zero if node was removed. For directed graphs only outgoing edges count std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v) std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v] std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges std::vector< std::vector<edgeid> > inEdgeIds; //!< only used for directed graphs, same schema as inEdges std::vector< std::vector<edgeid> > outEdgeIds; //!< same schema (and same order!) as outEdges /** * Returns the next unique graph id. */ count getNextGraphId(); /** * Returns the index of node u in the array of incoming edges of node v. (for directed graphs inEdges is searched, while for indirected outEdges is searched, which gives the same result as indexInOutEdgeArray). */ index indexInInEdgeArray(node v, node u) const; /** * Returns the index of node v in the array of outgoing edges of node u. */ index indexInOutEdgeArray(node u, node v) const; /** * Returns the edge weight of the outgoing edge of index i in the outgoing edges of node u * @param u The node * @param i The index * @return The weight of the outgoing edge or defaultEdgeWeight if the graph is unweighted */ template<bool hasWeights> inline edgeweight getOutEdgeWeight(node u, index i) const; /** * Returns the edge weight of the incoming edge of index i in the incoming edges of node u * * @param u The node * @param i The index in the incoming edge array * @return The weight of the incoming edge */ template<bool hasWeights> inline edgeweight getInEdgeWeight(node u, index i) const; /** * Returns the edge id of the edge of index i in the outgoing edges of node u * * @param u The node * @param i The index in the outgoing edges * @return The edge id */ template<bool graphHasEdgeIds> inline edgeid getOutEdgeId(node u, index i) const; /** * Returns the edge id of the edge of index i in the incoming edges of node u * * @param u The node * @param i The index in the incoming edges of u * @return The edge id */ template<bool graphHasEdgeIds> inline edgeid getInEdgeId(node u, index i) const; /** * @brief Returns if the edge (u, v) shall be used in the iteration of all edgesIndexed * * @param u The source node of the edge * @param v The target node of the edge * @return If the node shall be used, i.e. if v is not none and in the undirected case if u >= v */ template<bool graphIsDirected> inline bool useEdgeInIteration(node u, node v) const; /** * @brief Implementation of the for loop for outgoing edges of u * * Note: If all (valid) outgoing edges shall be considered, graphIsDirected needs to be set to true * * @param u The node * @param handle The handle that shall be executed for each edge * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void forOutEdgesOfImpl(node u, L handle) const; /** * @brief Implementation of the for loop for incoming edges of u * * For undirected graphs, this is the same as forOutEdgesOfImpl but u and v are changed in the handle * * @param u The node * @param handle The handle that shall be executed for each edge * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void forInEdgesOfImpl(node u, L handle) const; /** * @brief Implementation of the for loop for all edges, @see forEdges * * @param handle The handle that shall be executed for all edges * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void forEdgeImpl(L handle) const; /** * @brief Parallel implementation of the for loop for all edges, @see parallelForEdges * * @param handle The handle that shall be executed for all edges * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void parallelForEdgesImpl(L handle) const; /** * @brief Summation variant of the parallel for loop for all edges, @see parallelSumForEdges * * @param handle The handle that shall be executed for all edges * @return void */ template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline double parallelSumForEdgesImpl(L handle) const; /* * In the following definition, Aux::FunctionTraits is used in order to only execute lambda functions * with the appropriate parameters. The decltype-return type is used for determining the return type of * the lambda (needed for summation) but also determines if the lambda accepts the correct number of parameters. * Otherwise the return type declaration fails and the function is excluded from overload resoluation. * Then there are multiple possible lambdas with three (third parameter id or weight) and two (second parameter * can be second node id or edge weight for neighbor iterators). This is checked using Aux::FunctionTraits and * std::enable_if. std::enable_if only defines the type member when the given bool is true, this bool comes from * std::is_same which compares two types. The function traits give either the parameter type or if it is out of bounds * they define type as void. */ /** * Triggers a static assert error when no other method is chosen. Because of the use of "..." as arguments, the priority * of this method is lower than the priority of the other methods. This method avoids ugly and unreadable template substitution * error messages from the other declarations. */ template<class F, void* = (void*)0> typename Aux::FunctionTraits<F>::result_type edgeLambda(F&f, ...) const { // the strange condition is used in order to delay the eveluation of the static assert to the moment when this function is actually used static_assert(! std::is_same<F, F>::value, "Your lambda does not support the required parameters or the parameters have the wrong type."); return std::declval<typename Aux::FunctionTraits<F>::result_type>(); // use the correct return type (this won't compile) } /** * Calls the given function f if its fourth argument is of the type edgeid and third of type edgeweight * Note that the decltype check is not enough as edgeweight can be casted to node and we want to assure that . */ template < class F, typename std::enable_if < (Aux::FunctionTraits<F>::arity >= 3) && std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value && std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<3>::type>::value >::type * = (void*)0 > auto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew, id)) { return f(u, v, ew, id); } /** * Calls the given function f if its third argument is of the type edgeid, discards the edge weight * Note that the decltype check is not enough as edgeweight can be casted to node. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 2) && std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<2>::type>::value && std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value /* prevent f(v, weight, eid) */ >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, id)) { return f(u, v, id); } /** * Calls the given function f if its third argument is of type edgeweight, discards the edge id * Note that the decltype check is not enough as node can be casted to edgeweight. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 2) && std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew)) { return f(u, v, ew); } /** * Calls the given function f if it has only two arguments and the second argument is of type node, * discards edge weight and id * Note that the decltype check is not enough as edgeweight can be casted to node. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 1) && std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v)) { return f(u, v); } /** * Calls the given function f if it has only two arguments and the second argument is of type edgeweight, * discards the first node and the edge id * Note that the decltype check is not enough as edgeweight can be casted to node. */ template<class F, typename std::enable_if< (Aux::FunctionTraits<F>::arity >= 1) && std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<1>::type>::value >::type* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, ew)) { return f(v, ew); } /** * Calls the given function f if it has only one argument, discards the first * node id, the edge weight and the edge id */ template<class F, void* = (void*)0> auto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(v)) { return f(v); } /** * Calls the given BFS handle with distance parameter */ template <class F> auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) { return f(u, dist); } /** * Calls the given BFS handle without distance parameter */ template <class F> auto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u)) { return f(u); } public: /** * Create a graph of @a n nodes. The graph has assignable edge weights if @a weighted is set to <code>true</code>. * If @a weighted is set to <code>false</code> each edge has edge weight 1.0 and any other weight assignment will * be ignored. * @param n Number of nodes. * @param weighted If set to <code>true</code>, the graph has edge weights. * @param directed If set to @c true, the graph will be directed. */ Graph(count n = 0, bool weighted = false, bool directed = false); Graph(const Graph& G, bool weighted, bool directed); /** * Generate a weighted graph from a list of edges. (Useful for small * graphs in unit tests that you do not want to read from a file.) * * @param[in] edges list of weighted edges */ Graph(std::initializer_list<WeightedEdge> edges); /** * Create a graph as copy of @a other. * @param other The graph to copy. */ Graph(const Graph& other) = default; /** Default move constructor */ Graph(Graph&& other) = default; /** Default destructor */ ~Graph() = default; /** Default move assignment operator */ Graph& operator=(Graph&& other) = default; /** Default copy assignment operator */ Graph& operator=(const Graph& other) = default; /** EDGE IDS **/ /** * Initially assign integer edge identifiers. * * @param force Force re-indexing of edges even if they have already been indexed */ void indexEdges(bool force = false); /** * Checks if edges have been indexed * * @return bool if edges have been indexed */ bool hasEdgeIds() const { return edgesIndexed; } /** * Get the id of the given edge. */ edgeid edgeId(node u, node v) const; /** * Get an upper bound for the edge ids in the graph. * @return An upper bound for the edge ids. */ index upperEdgeIdBound() const { return omega; } /** GRAPH INFORMATION **/ /** * Get the ID of this graph. The ID is a unique unsigned integer given to * every graph on construction. */ count getId() const { return id; } /** * Return the type of the graph. * Graph: not weighted, undirected * WeightedGraph: weighted, undirected * DirectedGraph: not weighted, directed * WeightedDirectedGraph: weighted, directed */ std::string typ() const; /** * Try to save some memory by shrinking internal data structures of the graph. Only run this * once you finished editing the graph. Otherwise it will cause unnecessary reallocation of * memory. */ void shrinkToFit(); /** * Compacts the adjacency arrays by re-using no longer neede slots from deleted edges. */ void compactEdges(); /** * Sorts the adjacency arrays by node id. While the running time is linear this * temporarily duplicates the memory. */ void sortEdges(); /** * Set name of graph to @a name. * @param name The name. */ void setName(std::string name) { this->name = name; } /* * Returns the name of the graph. * @return The name of the graph. */ std::string getName() const { return name; } /** * Returns a string representation of the graph. * @return A string representation. */ std::string toString() const; /* COPYING */ /* * Copies all nodes to a new graph * @return graph with the same nodes. */ Graph copyNodes() const; /* NODE MODIFIERS */ /** * Add a new node to the graph and return it. * @return The new node. */ node addNode(); /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Add a new node to the graph with coordinates @a x and @y and return it. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] node addNode(float x, float y); /** * Remove an isolated node @a v from the graph. * * @param u Node. * @note Although it would be convenient to remove all incident edges at the same time, * this causes complications for dynamic applications. Therefore, removeNode is an * atomic event. All incident edges need to be removed first and an exception is thrown * otherwise. */ void removeNode(node v); /** * Check if node @a v exists in the graph. * * @param v Node. * @return @c true if @a v exists, @c false otherwise. */ bool hasNode(node v) const { return (v < z) && this->exists[v]; } /** * Restores a previously deleted node @a v with its previous id in the graph. * * @param v Node. * */ void restoreNode(node v); // SET OPERATIONS /** * Appends another graph to this graph as a new subgraph. Performs node * id remapping. * @param G [description] */ void append(const Graph& G); /** * Modifies this graph to be the union of it and another graph. * Nodes with the same ids are identified with each other. * @param G [description] */ void merge(const Graph& G); // SUBGRAPHS Graph subgraphFromNodes(const std::unordered_set<node>& nodes) const; /** NODE PROPERTIES **/ /** * Returns the number of outgoing neighbors of @a v. * * @param v Node. * @return The number of outgoing neighbors. */ count degree(node v) const { return outDeg[v]; } /** * Get the number of incoming neighbors of @a v. * * @param v Node. * @return The number of incoming neighbors. * @note If the graph is not directed, the outgoing degree is returned. */ count degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; } /** * Get the number of outgoing neighbors of @a v. * * @param v Node. * @return The number of outgoing neighbors. */ count degreeOut(node v) const { return outDeg[v]; } /** * Check whether @a v is isolated, i.e. degree is 0. * @param v Node. * @return @c true if the node is isolated (= degree is 0) */ bool isIsolated(node v) const { return outDeg[v] == 0 && (!directed || inDeg[v] == 0); } /** * Returns the weighted degree of @a v. * * @param v Node. * @return Weighted degree of @a v. * @note For directed graphs this is the sum of weights of all outgoing edges of @a v. */ edgeweight weightedDegree(node v) const; /** * Returns the volume of the @a v, which is the weighted degree with self-loops counted twice. * * @param v Node. * @return The volume of the @a v. */ edgeweight volume(node v) const; /** * Returns a random node of the graph. * @return A random node. */ node randomNode() const; /** * Returns a random neighbor of @a u and @c none if degree is zero. * * @param u Node. * @return A random neighbor of @a u. */ node randomNeighbor(node u) const; /* EDGE MODIFIERS */ /** * Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally * set a weight for this edge. The default weight is 1.0. * Note: Multi-edges are not supported and will NOT be handled consistently by the graph data * structure. * @param u Endpoint of edge. * @param v Endpoint of edge. * @param weight Optional edge weight. */ void addEdge(node u, node v, edgeweight ew = defaultEdgeWeight); /** * Removes the undirected edge {@a u,@a v}. * @param u Endpoint of edge. * @param v Endpoint of edge. */ void removeEdge(node u, node v); /** * Removes all self-loops in the graph. */ void removeSelfLoops(); /** * Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2, @a t2} into {@a s2, @a t1}. * * If there are edge weights or edge ids, they are preserved. Note that no check is performed if the swap is actually possible, i.e. does not generate duplicate edges. * * @param s1 The first source * @param t1 The first target * @param s2 The second source * @param t2 The second target */ void swapEdge(NetworKit::node s1, NetworKit::node t1, NetworKit::node s2, NetworKit::node t2); /** * Checks if undirected edge {@a u,@a v} exists in the graph. * @param u Endpoint of edge. * @param v Endpoint of edge. * @return <code>true</code> if the edge exists, <code>false</code> otherwise. */ bool hasEdge(node u, node v) const; /** * Returns a random edge. By default a random node u is chosen and then some random neighbor v. So the probability of choosing (u, v) highly * depends on the degree of u. * Setting uniformDistribution to true, will give you a real uniform distributed edge, but will be very slow. So only use uniformDistribution * for single calls outside of any loops. */ std::pair<node, node> randomEdge(bool uniformDistribution = false) const; /** * Returns a vector with nr random edges. The edges are chosen uniform random. */ std::vector< std::pair<node, node> > randomEdges(count nr) const; /* GLOBAL PROPERTIES */ /** * Returns <code>true</code> if this graph supports edge weights other than 1.0. * @return <code>true</code> if this graph supports edge weights other than 1.0. */ bool isWeighted() const { return weighted; } /** * Return @c true if this graph supports directed edges. * @return @c true if this graph supports directed edges. */ bool isDirected() const { return directed; } /** * Return <code>true</code> if graph contains no nodes. * @return <code>true</code> if graph contains no nodes. */ bool isEmpty() const { return n == 0; } /** * Return the number of nodes in the graph. * @return The number of nodes. */ count numberOfNodes() const { return n; } /** * Return the number of edges in the graph. * @return The number of edges. */ count numberOfEdges() const { return m; } /** * @return a pair (n, m) where n is the number of nodes and m is the number of edges */ std::pair<count, count> const size() { return {n, m}; }; /** * @return the density of the graph */ double density() const { count n = numberOfNodes(); count m = numberOfEdges(); count loops = numberOfSelfLoops(); m -= loops; double d; if (isDirected()) { d = m / (double) (n * (n-1)); } else { d = (2 * m) / (double) (n * (n-1)); } return d; } /** * Return the number of loops {v,v} in the graph. * @return The number of loops. * @note This involves calculation, so store result if needed multiple times. */ count numberOfSelfLoops() const; /** * Get an upper bound for the node ids in the graph. * @return An upper bound for the node ids. */ index upperNodeIdBound() const { return z; } /** * Check for invalid graph states, such as multi-edges. * @return False if the graph is in invalid state. */ bool checkConsistency() const; /* DYNAMICS */ /** * Trigger a time step - increments counter. */ void timeStep() { t++; } /** * Get time step counter. * @return Time step counter. */ count time() { return t; } /* COORDINATES */ /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Sets the coordinate of @a v to @a value. * * @param v Node. * @param value The coordinate of @a v. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] void setCoordinate(node v, Point<float> value) { coordinates.setCoordinate(v, value); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Get the coordinate of @a v. * @param v Node. * @return The coordinate of @a v. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] Point<float>& getCoordinate(node v) { return coordinates.getCoordinate(v); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Get minimum coordinate of all coordinates with respect to dimension @a dim. * @param dim The dimension to search for minimum. * @return The minimum coordinate in dimension @a dim. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] float minCoordinate(count dim) { return coordinates.minCoordinate(dim); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Get maximum coordinate of all coordinates with respect to dimension @a dim. * @param dim The dimension to search for maximum. * @return The maximum coordinate in dimension @a dim. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] float maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); } /** * DEPRECATED: Coordinates should be handled outside the Graph class * like general node attributes. * * Initializes the coordinates for the nodes in graph. * @note This has to be called once and before you set coordinates. Call this method again if new nodes have * been added. */ // TODO: remove method // [[deprecated("Deprecated: Node coordinates should be stored externally like any other node attribute")]] void initCoordinates() { coordinates.init(z); } /* EDGE ATTRIBUTES */ /** * Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist. * BEWARE: Running time is \Theta(deg(u))! * * @param u Endpoint of edge. * @param v Endpoint of edge. * @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist. */ edgeweight weight(node u, node v) const; /** * Set the weight of an edge. If the edge does not exist, * it will be inserted. * * @param[in] u endpoint of edge * @param[in] v endpoint of edge * @param[in] weight edge weight */ void setWeight(node u, node v, edgeweight ew); /** * Increase the weight of an edge. If the edge does not exist, * it will be inserted. * * @param[in] u endpoint of edge * @param[in] v endpoint of edge * @param[in] weight edge weight */ void increaseWeight(node u, node v, edgeweight ew); /* SUMS */ /** * Returns the sum of all edge weights. * @return The sum of all edge weights. */ edgeweight totalEdgeWeight() const; /* Collections */ /** * Get list of all nodes. * @return List of all nodes. */ std::vector<node> nodes() const; /** * Get list of edges as node pairs. * @return List of edges as node pairs. */ std::vector<std::pair<node, node> > edges() const; /** * Get list of neighbors of @a u. * * @param u Node. * @return List of neighbors of @a u. */ std::vector<node> neighbors(node u) const; /** * Get i-th (outgoing) neighbor of @a u. * WARNING: This function is deprecated or only temporary. * * @param u Node. * @param i index; should be in [0, degreeOut(u)) * @return @a i -th (outgoing) neighbor of @a u, or @c none if no such * neighbor exists. */ template<bool graphIsDirected> node getIthNeighbor(node u, index i) const { node v = outEdges[u][i]; if (useEdgeInIteration<graphIsDirected>(u, v)) return v; else return none; } /* Derivative Graphs */ /** * Return an undirected version of this graph. * * @return undirected graph. */ Graph toUndirected() const; /** * Return an unweighted version of this graph. * * @return unweighted graph. */ Graph toUnweighted() const; /** * Return the transpose of this graph. The graph must be directed. * * @return transpose of the graph. */ Graph transpose() const; /* NODE ITERATORS */ /** * Iterate over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void forNodes(L handle) const; /** * Iterate randomly over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void parallelForNodes(L handle) const; /** Iterate over all nodes of the graph and call @a handle (lambda closure) as long as @a condition remains true. * This allows for breaking from a node loop. * * @param condition Returning <code>false</code> breaks the loop. * @param handle Takes parameter <code>(node)</code>. */ template<typename C, typename L> void forNodesWhile(C condition, L handle) const; /** * Iterate randomly over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void forNodesInRandomOrder(L handle) const; /** * Iterate in parallel over all nodes of the graph and call handler (lambda closure). * Using schedule(guided) to remedy load-imbalances due to e.g. unequal degree distribution. * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void balancedParallelForNodes(L handle) const; /** * Iterate over all undirected pairs of nodes and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>. */ template<typename L> void forNodePairs(L handle) const; /** * Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>. */ template<typename L> void parallelForNodePairs(L handle) const; /* EDGE ITERATORS */ /** * Iterate over all edges of the const graph and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>. */ template<typename L> void forEdges(L handle) const; /** * Iterate in parallel over all edges of the const graph and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code> or <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>. */ template<typename L> void parallelForEdges(L handle) const; /* NEIGHBORHOOD ITERATORS */ /** * Iterate over all neighbors of a node and call @a handle (lamdba closure). * * @param u Node. * @param handle Takes parameter <code>(node)</code> or <code>(node, edgeweight)</code> which is a neighbor of @a u. * @note For directed graphs only outgoing edges from @a u are considered. * A node is its own neighbor if there is a self-loop. * */ template<typename L> void forNeighborsOf(node u, L handle) const; /** * Iterate over all incident edges of a node and call @a handle (lamdba closure). * * @param u Node. * @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code> where the first node is @a u and the second is a neighbor of @a u. * @note For undirected graphs all edges incident to @a u are also outgoing edges. */ template<typename L> void forEdgesOf(node u, L handle) const; /** * Iterate over all neighbors of a node and call handler (lamdba closure). * For directed graphs only incoming edges from u are considered. */ template<typename L> void forInNeighborsOf(node u, L handle) const; /** * Iterate over all incoming edges of a node and call handler (lamdba closure). * @note For undirected graphs all edges incident to u are also incoming edges. * * Handle takes parameters (u, v) or (u, v, w) where w is the edge weight. */ template<typename L> void forInEdgesOf(node u, L handle) const; /* REDUCTION ITERATORS */ /** * Iterate in parallel over all nodes and sum (reduce +) the values returned by the handler */ template<typename L> double parallelSumForNodes(L handle) const; /** * Iterate in parallel over all edges and sum (reduce +) the values returned by the handler */ template<typename L> double parallelSumForEdges(L handle) const; /* GRAPH SEARCHES */ /** * Iterate over nodes in breadth-first search order starting from r until connected component * of r has been visited. * * @param r Node. * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void BFSfrom(node r, L handle) const; template<typename L> void BFSfrom(const std::vector<node> &startNodes, L handle) const; template<typename L> void BFSEdgesFrom(node r, L handle) const; /** * Iterate over nodes in depth-first search order starting from r until connected component * of r has been visited. * * @param r Node. * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void DFSfrom(node r, L handle) const; template<typename L> void DFSEdgesFrom(node r, L handle) const; }; /* NODE ITERATORS */ template<typename L> void Graph::forNodes(L handle) const { for (node v = 0; v < z; ++v) { if (exists[v]) { handle(v); } } } template<typename L> void Graph::parallelForNodes(L handle) const { #pragma omp parallel for for (node v = 0; v < z; ++v) { if (exists[v]) { handle(v); } } } template<typename C, typename L> void Graph::forNodesWhile(C condition, L handle) const { for (node v = 0; v < z; ++v) { if (exists[v]) { if (!condition()) { break; } handle(v); } } } template<typename L> void Graph::forNodesInRandomOrder(L handle) const { std::vector<node> randVec = nodes(); std::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG()); for (node v : randVec) { handle(v); } } template<typename L> void Graph::balancedParallelForNodes(L handle) const { #pragma omp parallel for schedule(guided) // TODO: define min block size (and test it!) for (node v = 0; v < z; ++v) { if (exists[v]) { handle(v); } } } template<typename L> void Graph::forNodePairs(L handle) const { for (node u = 0; u < z; ++u) { if (exists[u]) { for (node v = u + 1; v < z; ++v) { if (exists[v]) { handle(u, v); } } } } } template<typename L> void Graph::parallelForNodePairs(L handle) const { #pragma omp parallel for schedule(guided) for (node u = 0; u < z; ++u) { if (exists[u]) { for (node v = u + 1; v < z; ++v) { if (exists[v]) { handle(u, v); } } } } } /* EDGE ITERATORS */ /* HELPERS */ template<bool hasWeights> // implementation for weighted == true inline edgeweight Graph::getOutEdgeWeight(node u, index i) const { return outEdgeWeights[u][i]; } template<> // implementation for weighted == false inline edgeweight Graph::getOutEdgeWeight<false>(node, index) const { return defaultEdgeWeight; } template<bool hasWeights> // implementation for weighted == true inline edgeweight Graph::getInEdgeWeight(node u, index i) const { return inEdgeWeights[u][i]; } template<> // implementation for weighted == false inline edgeweight Graph::getInEdgeWeight<false>(node, index) const { return defaultEdgeWeight; } template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true inline edgeid Graph::getOutEdgeId(node u, index i) const { return outEdgeIds[u][i]; } template<> // implementation for hasEdgeIds == false inline edgeid Graph::getOutEdgeId<false>(node, index) const { return 0; } template<bool graphHasEdgeIds> // implementation for hasEdgeIds == true inline edgeid Graph::getInEdgeId(node u, index i) const { return inEdgeIds[u][i]; } template<> // implementation for hasEdgeIds == false inline edgeid Graph::getInEdgeId<false>(node, index) const { return 0; } template<bool graphIsDirected> // implementation for graphIsDirected == true inline bool Graph::useEdgeInIteration(node u, node v) const { return v != none; } template<> // implementation for graphIsDirected == false inline bool Graph::useEdgeInIteration<false>(node u, node v) const { return u >= v; } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::forOutEdgesOfImpl(node u, L handle) const { for (index i = 0; i < outEdges[u].size(); ++i) { node v = outEdges[u][i]; if (useEdgeInIteration<graphIsDirected>(u, v)) { edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i)); } } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::forInEdgesOfImpl(node u, L handle) const { if (graphIsDirected) { for (index i = 0; i < inEdges[u].size(); i++) { node v = inEdges[u][i]; if (useEdgeInIteration<true>(u, v)) { edgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i), getInEdgeId<graphHasEdgeIds>(u, i)); } } } else { for (index i = 0; i < outEdges[u].size(); ++i) { node v = outEdges[u][i]; if (useEdgeInIteration<true>(u, v)) { edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i)); } } } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::forEdgeImpl(L handle) const { for (node u = 0; u < z; ++u) { forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle); } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline void Graph::parallelForEdgesImpl(L handle) const { #pragma omp parallel for schedule(guided) for (node u = 0; u < z; ++u) { forOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle); } } template<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L> inline double Graph::parallelSumForEdgesImpl(L handle) const { double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (node u = 0; u < z; ++u) { for (index i = 0; i < outEdges[u].size(); ++i) { node v = outEdges[u][i]; // undirected, do not iterate over edges twice // {u, v} instead of (u, v); if v == none, u > v is not fulfilled if (useEdgeInIteration<graphIsDirected>(u, v)) { sum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i)); } } } return sum; } template<typename L> void Graph::forEdges(L handle) const { switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: // unweighted, undirected, no edgeIds forEdgeImpl<false, false, false, L>(handle); break; case 1: // weighted, undirected, no edgeIds forEdgeImpl<false, true, false, L>(handle); break; case 2: // unweighted, directed, no edgeIds forEdgeImpl<true, false, false, L>(handle); break; case 3: // weighted, directed, no edgeIds forEdgeImpl<true, true, false, L>(handle); break; case 4: // unweighted, undirected, with edgeIds forEdgeImpl<false, false, true, L>(handle); break; case 5: // weighted, undirected, with edgeIds forEdgeImpl<false, true, true, L>(handle); break; case 6: // unweighted, directed, with edgeIds forEdgeImpl<true, false, true, L>(handle); break; case 7: // weighted, directed, with edgeIds forEdgeImpl<true, true, true, L>(handle); break; } } template<typename L> void Graph::parallelForEdges(L handle) const { switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: // unweighted, undirected, no edgeIds parallelForEdgesImpl<false, false, false, L>(handle); break; case 1: // weighted, undirected, no edgeIds parallelForEdgesImpl<false, true, false, L>(handle); break; case 2: // unweighted, directed, no edgeIds parallelForEdgesImpl<true, false, false, L>(handle); break; case 3: // weighted, directed, no edgeIds parallelForEdgesImpl<true, true, false, L>(handle); break; case 4: // unweighted, undirected, with edgeIds parallelForEdgesImpl<false, false, true, L>(handle); break; case 5: // weighted, undirected, with edgeIds parallelForEdgesImpl<false, true, true, L>(handle); break; case 6: // unweighted, directed, with edgeIds parallelForEdgesImpl<true, false, true, L>(handle); break; case 7: // weighted, directed, with edgeIds parallelForEdgesImpl<true, true, true, L>(handle); break; } } /* NEIGHBORHOOD ITERATORS */ template<typename L> void Graph::forNeighborsOf(node u, L handle) const { forEdgesOf(u, handle); } template<typename L> void Graph::forEdgesOf(node u, L handle) const { switch (weighted + 2 * edgesIndexed) { case 0: //not weighted, no edge ids forOutEdgesOfImpl<true, false, false, L>(u, handle); break; case 1: //weighted, no edge ids forOutEdgesOfImpl<true, true, false, L>(u, handle); break; case 2: //not weighted, with edge ids forOutEdgesOfImpl<true, false, true, L>(u, handle); break; case 3: //weighted, with edge ids forOutEdgesOfImpl<true, true, true, L>(u, handle); break; } } template<typename L> void Graph::forInNeighborsOf(node u, L handle) const { forInEdgesOf(u, handle); } template<typename L> void Graph::forInEdgesOf(node u, L handle) const { switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: //unweighted, undirected, no edge ids forInEdgesOfImpl<false, false, false, L>(u, handle); break; case 1: //weighted, undirected, no edge ids forInEdgesOfImpl<false, true, false, L>(u, handle); break; case 2: //unweighted, directed, no edge ids forInEdgesOfImpl<true, false, false, L>(u, handle); break; case 3: //weighted, directed, no edge ids forInEdgesOfImpl<true, true, false, L>(u, handle); break; case 4: //unweighted, undirected, with edge ids forInEdgesOfImpl<false, false, true, L>(u, handle); break; case 5: //weighted, undirected, with edge ids forInEdgesOfImpl<false, true, true, L>(u, handle); break; case 6: //unweighted, directed, with edge ids forInEdgesOfImpl<true, false, true, L>(u, handle); break; case 7: //weighted, directed, with edge ids forInEdgesOfImpl<true, true, true, L>(u, handle); break; } } /* REDUCTION ITERATORS */ template<typename L> double Graph::parallelSumForNodes(L handle) const { double sum = 0.0; #pragma omp parallel for reduction(+:sum) for (node v = 0; v < z; ++v) { if (exists[v]) { sum += handle(v); } } return sum; } template<typename L> double Graph::parallelSumForEdges(L handle) const { double sum = 0.0; switch (weighted + 2 * directed + 4 * edgesIndexed) { case 0: // unweighted, undirected, no edge ids sum = parallelSumForEdgesImpl<false, false, false, L>(handle); break; case 1: // weighted, undirected, no edge ids sum = parallelSumForEdgesImpl<false, true, false, L>(handle); break; case 2: // unweighted, directed, no edge ids sum = parallelSumForEdgesImpl<true, false, false, L>(handle); break; case 3: // weighted, directed, no edge ids sum = parallelSumForEdgesImpl<true, true, false, L>(handle); break; case 4: // unweighted, undirected, with edge ids sum = parallelSumForEdgesImpl<false, false, true, L>(handle); break; case 5: // weighted, undirected, with edge ids sum = parallelSumForEdgesImpl<false, true, true, L>(handle); break; case 6: // unweighted, directed, with edge ids sum = parallelSumForEdgesImpl<true, false, true, L>(handle); break; case 7: // weighted, directed, with edge ids sum = parallelSumForEdgesImpl<true, true, true, L>(handle); break; } return sum; } /* GRAPH SEARCHES */ template<typename L> void Graph::BFSfrom(node r, L handle) const { std::vector<node> startNodes(1, r); BFSfrom(startNodes, handle); } template<typename L> void Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const { std::vector<bool> marked(z); std::queue<node> q, qNext; count dist = 0; // enqueue start nodes for (node u : startNodes) { q.push(u); marked[u] = true; } do { node u = q.front(); q.pop(); // apply function callBFSHandle(handle, u, dist); forNeighborsOf(u, [&](node v) { if (!marked[v]) { qNext.push(v); marked[v] = true; } }); if (q.empty() && !qNext.empty()) { q.swap(qNext); ++dist; } } while (!q.empty()); } template<typename L> void Graph::BFSEdgesFrom(node r, L handle) const { std::vector<bool> marked(z); std::queue<node> q; q.push(r); // enqueue root marked[r] = true; do { node u = q.front(); q.pop(); // apply function forNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) { if (!marked[v]) { handle(u, v, w, eid); q.push(v); marked[v] = true; } }); } while (!q.empty()); } template<typename L> void Graph::DFSfrom(node r, L handle) const { std::vector<bool> marked(z); std::stack<node> s; s.push(r); // enqueue root marked[r] = true; do { node u = s.top(); s.pop(); // apply function handle(u); forNeighborsOf(u, [&](node v) { if (!marked[v]) { s.push(v); marked[v] = true; } }); } while (!s.empty()); } template<typename L> void Graph::DFSEdgesFrom(node r, L handle) const { std::vector<bool> marked(z); std::stack<node> s; s.push(r); // enqueue root marked[r] = true; do { node u = s.top(); s.pop(); // apply function forNeighborsOf(u, [&](node v) { if (!marked[v]) { handle(u, v); s.push(v); marked[v] = true; } }); } while (!s.empty()); } } /* namespace NetworKit */ #endif /* GRAPH_H_ */
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % John Cristy % % October 1996 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/morphology.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveBlurImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *AdaptiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) <= MagickEpsilon ? 1.0 : sigma) CacheView *blur_view, *edge_view, *image_view; double **kernel, normalize; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) LevelImage(edge_image,"20%,95%"); gaussian_image=GaussianBlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) LevelImage(edge_image,"10%,95%"); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) AcquireQuantumMemory((size_t) width,sizeof(*kernel)); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) AcquireQuantumMemory((size_t) (width-i),(width-i)* sizeof(**kernel)); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } if (fabs(normalize) <= MagickEpsilon) normalize=1.0; normalize=1.0/normalize; for (k=0; k < (j*j); k++) kernel[i][k]=normalize*kernel[i][k]; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishMagickMemory(kernel[i]); kernel=(double **) RelinquishMagickMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireCacheView(image); edge_view=AcquireCacheView(edge_image); blur_view=AcquireCacheView(blur_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p, *restrict r; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket pixel; MagickRealType alpha, gamma; register const double *restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*QuantumScale*PixelIntensity(r)-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=bias; k=kernel[i]; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImageChannel) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishMagickMemory(kernel[i]); kernel=(double **) RelinquishMagickMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveSharpenImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma, exception); return(sharp_image); } MagickExport Image *AdaptiveSharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) <= MagickEpsilon ? 1.0 : sigma) CacheView *sharp_view, *edge_view, *image_view; double **kernel, normalize; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse) { InheritException(exception,&sharp_image->exception); sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) LevelImage(edge_image,"20%,95%"); gaussian_image=GaussianBlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) LevelImage(edge_image,"10%,95%"); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) AcquireQuantumMemory((size_t) width,sizeof(*kernel)); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) AcquireQuantumMemory((size_t) (width-i),(width-i)* sizeof(**kernel)); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } if (fabs(normalize) <= MagickEpsilon) normalize=1.0; normalize=1.0/normalize; for (k=0; k < (j*j); k++) kernel[i][k]=normalize*kernel[i][k]; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishMagickMemory(kernel[i]); kernel=(double **) RelinquishMagickMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireCacheView(image); edge_view=AcquireCacheView(edge_image); sharp_view=AcquireCacheView(sharp_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p, *restrict r; register IndexPacket *restrict sharp_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view); for (x=0; x < (ssize_t) sharp_image->columns; x++) { MagickPixelPacket pixel; MagickRealType alpha, gamma; register const double *restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*(QuantumRange-QuantumScale* PixelIntensity(r))-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); k=kernel[i]; pixel=bias; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImageChannel) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishMagickMemory(kernel[i]); kernel=(double **) RelinquishMagickMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % BlurImage() differs from GaussianBlurImage() in that it uses a separable % kernel which is faster but mathematically equivalent to the non-separable % kernel. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *BlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception); return(blur_image); } static double *GetBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; ssize_t j, k; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) AcquireQuantumMemory((size_t) width,sizeof(*kernel)); if (kernel == (double *) NULL) return(0); normalize=0.0; j=(ssize_t) width/2; i=0; for (k=(-j); k <= j; k++) { kernel[i]=(double) (exp(-((double) k*k)/(2.0*MagickSigma*MagickSigma))/ (MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; i++; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *BlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t x, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetBlurKernel(width,sigma); if (kernel == (double *) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " BlurImage with %.20g kernel:",(double) width); message=AcquireString(""); k=kernel; for (i=0; i < (ssize_t) width; i++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) i); (void) ConcatenateString(&message,format); (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Blur rows. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireCacheView(image); blur_view=AcquireCacheView(blur_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y, image->columns+width,1,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket pixel; register const double *restrict k; register const PixelPacket *restrict kernel_pixels; register ssize_t i; pixel=bias; k=kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { pixel.red+=(*k)*GetPixelRed(kernel_pixels); pixel.green+=(*k)*GetPixelGreen(kernel_pixels); pixel.blue+=(*k)*GetPixelBlue(kernel_pixels); k++; kernel_pixels++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=kernel; kernel_pixels=p; for (i=0; i < (ssize_t) width; i++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels); k++; kernel_pixels++; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=kernel; kernel_indexes=indexes; for (i=0; i < (ssize_t) width; i++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes); k++; kernel_indexes++; } SetPixelIndex(blur_indexes+x,ClampToQuantum(pixel.index)); } } else { MagickRealType alpha, gamma; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(kernel_pixels)); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels); gamma+=(*k)*alpha; k++; kernel_pixels++; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=kernel; kernel_pixels=p; for (i=0; i < (ssize_t) width; i++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels); k++; kernel_pixels++; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=kernel; kernel_pixels=p; kernel_indexes=indexes; for (i=0; i < (ssize_t) width; i++) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(kernel_pixels)); pixel.index+=(*k)*alpha*(*kernel_indexes); k++; kernel_pixels++; kernel_indexes++; } SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,blur_image->rows+ blur_image->columns); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); /* Blur columns. */ image_view=AcquireCacheView(blur_image); blur_view=AcquireCacheView(blur_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (x=0; x < (ssize_t) blur_image->columns; x++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t y; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-((ssize_t) width/2L),1, image->rows+width,exception); q=GetCacheViewAuthenticPixels(blur_view,x,0,1,blur_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (y=0; y < (ssize_t) blur_image->rows; y++) { MagickPixelPacket pixel; register const double *restrict k; register const PixelPacket *restrict kernel_pixels; register ssize_t i; pixel=bias; k=kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { pixel.red+=(*k)*GetPixelRed(kernel_pixels); pixel.green+=(*k)*GetPixelGreen(kernel_pixels); pixel.blue+=(*k)*GetPixelBlue(kernel_pixels); k++; kernel_pixels++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=kernel; kernel_pixels=p; for (i=0; i < (ssize_t) width; i++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels); k++; kernel_pixels++; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=kernel; kernel_indexes=indexes; for (i=0; i < (ssize_t) width; i++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes); k++; kernel_indexes++; } SetPixelIndex(blur_indexes+y,ClampToQuantum(pixel.index)); } } else { MagickRealType alpha, gamma; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(kernel_pixels)); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels); gamma+=(*k)*alpha; k++; kernel_pixels++; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=kernel; kernel_pixels=p; for (i=0; i < (ssize_t) width; i++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels); k++; kernel_pixels++; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=kernel; kernel_pixels=p; kernel_indexes=indexes; for (i=0; i < (ssize_t) width; i++) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(kernel_pixels)); pixel.index+=(*k)*alpha*(*kernel_indexes); k++; kernel_pixels++; kernel_indexes++; } SetPixelIndex(blur_indexes+y,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,blur_image->rows+ blur_image->columns); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishMagickMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); blur_image->type=image->type; return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const size_t order, % const double *kernel,ExceptionInfo *exception) % Image *ConvolveImageChannel(const Image *image,const ChannelType channel, % const size_t order,const double *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o order: the number of columns and rows in the filter kernel. % % o kernel: An array of double representing the convolution kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image,const size_t order, const double *kernel,ExceptionInfo *exception) { Image *convolve_image; convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel, exception); return(convolve_image); } MagickExport Image *ConvolveImageChannel(const Image *image, const ChannelType channel,const size_t order,const double *kernel, ExceptionInfo *exception) { #define ConvolveImageTag "Convolve/Image" CacheView *convolve_view, *image_view; double *normal_kernel; Image *convolve_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType gamma; register ssize_t i; size_t width; ssize_t y; /* Initialize convolve image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=order; if ((width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); convolve_image=CloneImage(image,0,0,MagickTrue,exception); if (convolve_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(convolve_image,DirectClass) == MagickFalse) { InheritException(exception,&convolve_image->exception); convolve_image=DestroyImage(convolve_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ConvolveImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Normalize kernel. */ normal_kernel=(double *) AcquireQuantumMemory(width*width, sizeof(*normal_kernel)); if (normal_kernel == (double *) NULL) { convolve_image=DestroyImage(convolve_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } gamma=0.0; for (i=0; i < (ssize_t) (width*width); i++) gamma+=kernel[i]; gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); for (i=0; i < (ssize_t) (width*width); i++) normal_kernel[i]=gamma*kernel[i]; /* Convolve image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireCacheView(image); convolve_view=AcquireCacheView(convolve_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict convolve_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(convolve_view,0,y,convolve_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); convolve_indexes=GetCacheViewAuthenticIndexQueue(convolve_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; register const double *restrict k; register const PixelPacket *restrict kernel_pixels; register ssize_t u; ssize_t v; pixel=bias; k=normal_kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=normal_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=normal_kernel; kernel_indexes=indexes; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+width; } SetPixelIndex(convolve_indexes+x,ClampToQuantum(pixel.index)); } } else { MagickRealType alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.red+=(*k)*alpha*kernel_pixels[u].red; pixel.green+=(*k)*alpha*kernel_pixels[u].green; pixel.blue+=(*k)*alpha*kernel_pixels[u].blue; gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+width; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=normal_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=normal_kernel; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex( kernel_indexes+u); k++; } kernel_pixels+=image->columns+width; kernel_indexes+=image->columns+width; } SetPixelIndex(convolve_indexes+x,ClampToQuantum(gamma* pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(convolve_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ConvolveImageChannel) #endif proceed=SetImageProgress(image,ConvolveImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } convolve_image->type=image->type; convolve_view=DestroyCacheView(convolve_view); image_view=DestroyCacheView(image_view); normal_kernel=(double *) RelinquishMagickMemory(normal_kernel); if (status == MagickFalse) convolve_image=DestroyImage(convolve_image); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const ssize_t x_offset,const ssize_t y_offset, const size_t columns,const size_t rows,Quantum *f,Quantum *g, const int polarity) { MagickRealType v; register Quantum *p, *q, *r, *s; register ssize_t x; ssize_t y; assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); for (y=0; y < (ssize_t) rows; y++) { p++; q++; r++; if (polarity > 0) for (x=(ssize_t) columns; x != 0; x--) { v=(MagickRealType) (*p); if ((MagickRealType) *r >= (v+(MagickRealType) ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); *q=(Quantum) v; p++; q++; r++; } else for (x=(ssize_t) columns; x != 0; x--) { v=(MagickRealType) (*p); if ((MagickRealType) *r <= (v-(MagickRealType) ScaleCharToQuantum(2))) v-=(ssize_t) ScaleCharToQuantum(1); *q=(Quantum) v; p++; q++; r++; } p++; q++; r++; } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); for (y=0; y < (ssize_t) rows; y++) { p++; q++; r++; s++; if (polarity > 0) for (x=(ssize_t) columns; x != 0; x--) { v=(MagickRealType) (*q); if (((MagickRealType) *s >= (v+(MagickRealType) ScaleCharToQuantum(2))) && ((MagickRealType) *r > v)) v+=ScaleCharToQuantum(1); *p=(Quantum) v; p++; q++; r++; s++; } else for (x=(ssize_t) columns; x != 0; x--) { v=(MagickRealType) (*q); if (((MagickRealType) *s <= (v-(MagickRealType) ScaleCharToQuantum(2))) && ((MagickRealType) *r < v)) v-=(MagickRealType) ScaleCharToQuantum(1); *p=(Quantum) v; p++; q++; r++; s++; } p++; q++; r++; s++; } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; register ssize_t i; Quantum *restrict buffers, *restrict pixels; size_t length, number_channels; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse) { InheritException(exception,&despeckle_image->exception); despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffers. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixels=(Quantum *) AcquireQuantumMemory(length,2*sizeof(*pixels)); buffers=(Quantum *) AcquireQuantumMemory(length,2*sizeof(*pixels)); if ((pixels == (Quantum *) NULL) || (buffers == (Quantum *) NULL)) { if (buffers != (Quantum *) NULL) buffers=(Quantum *) RelinquishMagickMemory(buffers); if (pixels != (Quantum *) NULL) pixels=(Quantum *) RelinquishMagickMemory(pixels); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Reduce speckle in the image. */ status=MagickTrue; number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4); image_view=AcquireCacheView(image); despeckle_view=AcquireCacheView(despeckle_image); for (i=0; i < (ssize_t) number_channels; i++) { register Quantum *buffer, *pixel; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; pixel=pixels; (void) ResetMagickMemory(pixel,0,length*sizeof(*pixel)); buffer=buffers; j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: pixel[j]=GetPixelRed(p); break; case 1: pixel[j]=GetPixelGreen(p); break; case 2: pixel[j]=GetPixelBlue(p); break; case 3: pixel[j]=GetPixelOpacity(p); break; case 4: pixel[j]=GetPixelBlack(indexes+x); break; default: break; } p++; j++; } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(X[k],Y[k],image->columns,image->rows,pixel,buffer,1); Hull(-X[k],-Y[k],image->columns,image->rows,pixel,buffer,1); Hull(-X[k],-Y[k],image->columns,image->rows,pixel,buffer,-1); Hull(X[k],Y[k],image->columns,image->rows,pixel,buffer,-1); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: SetPixelRed(q,pixel[j]); break; case 1: SetPixelGreen(q,pixel[j]); break; case 2: SetPixelBlue(q,pixel[j]); break; case 3: SetPixelOpacity(q,pixel[j]); break; case 4: SetPixelIndex(indexes+x,pixel[j]); break; default: break; } q++; j++; } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) { status=MagickFalse; break; } j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, number_channels); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffers=(Quantum *) RelinquishMagickMemory(buffers); pixels=(Quantum *) RelinquishMagickMemory(pixels); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; double *kernel; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel)); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) (width*width); i++) kernel[i]=(-1.0); kernel[i/2]=(double) (width*width-1.0); edge_image=ConvolveImage(image,width,kernel,exception); kernel=(double *) RelinquishMagickMemory(kernel); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double *kernel; Image *emboss_image; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel)); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) width/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)* exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel[i]=0.0; i++; } k--; } emboss_image=ConvolveImage(image,width,kernel,exception); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image); kernel=(double *) RelinquishMagickMemory(kernel); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FilterImage() applies a custom convolution kernel to the image. % % The format of the FilterImage method is: % % Image *FilterImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % Image *FilterImageChannel(const Image *image,const ChannelType channel, % const KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel, ExceptionInfo *exception) { Image *filter_image; filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception); return(filter_image); } MagickExport Image *FilterImageChannel(const Image *image, const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception) { #define FilterImageTag "Filter/Image" CacheView *filter_view, *image_view; Image *filter_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; ssize_t y; /* Initialize filter image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((kernel->width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); filter_image=CloneImage(image,0,0,MagickTrue,exception); if (filter_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse) { InheritException(exception,&filter_image->exception); filter_image=DestroyImage(filter_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double) kernel->height); message=AcquireString(""); k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) kernel->width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } status=AccelerateConvolveImage(image,kernel,filter_image,exception); if (status == MagickTrue) return(filter_image); /* Filter image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireCacheView(image); filter_view=AcquireCacheView(filter_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict filter_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel->width/2L), y-(ssize_t) (kernel->height/2L),image->columns+kernel->width, kernel->height,exception); q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; register const double *restrict k; register const PixelPacket *restrict kernel_pixels; register ssize_t u; ssize_t v; pixel=bias; k=kernel->values; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+kernel->width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=kernel->values; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=kernel->values; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index)); } } else { MagickRealType alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- GetPixelOpacity(kernel_pixels+u))); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u); gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+kernel->width; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=kernel->values; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=kernel->values; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u); k++; } kernel_pixels+=image->columns+kernel->width; kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(filter_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FilterImageChannel) #endif proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } filter_image->type=image->type; filter_view=DestroyCacheView(filter_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) filter_image=DestroyImage(filter_image); return(filter_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % Image *GaussianBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *GaussianBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double *kernel; Image *blur_image; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel)); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) width/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } blur_image=ConvolveImageChannel(image,channel,width,kernel,exception); kernel=(double *) RelinquishMagickMemory(kernel); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % Image *MotionBlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static double *GetMotionBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) AcquireQuantumMemory((size_t) width,sizeof(*kernel)); if (kernel == (double *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { Image *motion_blur; motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle, exception); return(motion_blur); } MagickExport Image *MotionBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(double *) RelinquishMagickMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishMagickMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishMagickMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireCacheView(image); blur_view=AcquireCacheView(blur_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket qixel; PixelPacket pixel; register const IndexPacket *restrict indexes; register double *restrict k; register ssize_t i; k=kernel; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); qixel.red+=(*k)*pixel.red; qixel.green+=(*k)*pixel.green; qixel.blue+=(*k)*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*(*indexes); } k++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index)); } else { MagickRealType alpha, gamma; alpha=0.0; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=(*k)*alpha*pixel.red; qixel.green+=(*k)*alpha*pixel.green; qixel.blue+=(*k)*alpha*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*alpha*GetPixelIndex(indexes); } gamma+=(*k)*alpha; k++; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishMagickMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MaxTextExtent], label[MaxTextExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel); if (i == (NumberTiles/2)) { (void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g", degrees,2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImageChannel(preview_image,DefaultChannels,gamma); (void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse); (void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MaxTextExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MaxTextExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MaxTextExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MaxTextExtent); break; } case 4: { (void) CopyMagickString(factor,"laplacian",MaxTextExtent); break; } case 5: { (void) CopyMagickString(factor,"Poisson",MaxTextExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail, (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); (void) FormatLocaleString(label,MaxTextExtent,"threshold %g", (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange* percentage/100.0); (void) FormatLocaleString(label,MaxTextExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g", degrees,degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=i/2; geometry.y=i/2; (void) RaiseImage(preview_image,&geometry,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,RGBColorspace,MagickFalse,threshold, threshold); (void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g", 0.5*degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MaxTextExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MaxTextExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MaxTextExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MaxTextExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RadialBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RadialBlurImage method is: % % Image *RadialBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RadialBlurImageChannel(const Image *image,const ChannelType channel, % const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the radial blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RadialBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { Image *blur_image; blur_image=RadialBlurImageChannel(image,DefaultChannels,angle,exception); return(blur_image); } MagickExport Image *RadialBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType blur_radius, *cos_theta, offset, *sin_theta, theta; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) image->columns/2.0; blur_center.y=(double) image->rows/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(MagickRealType) (n-1); cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (MagickRealType *) NULL) || (sin_theta == (MagickRealType *) NULL)) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(MagickRealType) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireCacheView(image); blur_view=AcquireCacheView(blur_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *restrict indexes; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket qixel; MagickRealType normalize, radius; PixelPacket pixel; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } normalize=0.0; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); qixel.red+=pixel.red; qixel.green+=pixel.green; qixel.blue+=pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*indexes); } normalize+=1.0; } normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 : normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(normalize*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(normalize*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index)); } else { MagickRealType alpha, gamma; alpha=1.0; gamma=0.0; for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(&pixel)); qixel.red+=alpha*pixel.red; qixel.green+=alpha*pixel.green; qixel.blue+=alpha*pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=alpha*(*indexes); } gamma+=alpha; normalize+=1.0; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); normalize=1.0/(fabs((double) normalize) <= MagickEpsilon ? 1.0 : normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RadialBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta); sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % Image *SelectiveBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType SelectiveContrast(const PixelPacket *p, const PixelPacket *q,const double threshold) { if (fabs(PixelIntensity(p)-PixelIntensity(q)) < threshold) return(MagickTrue); return(MagickFalse); } MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { Image *blur_image; blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma, threshold,exception); return(blur_image); } MagickExport Image *SelectiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(double *) AcquireQuantumMemory((size_t) width,width*sizeof(*kernel)); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) width/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireCacheView(image); blur_view=AcquireCacheView(blur_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickRealType gamma; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; register const double *restrict k; register ssize_t u; ssize_t j, v; pixel=bias; k=kernel; gamma=0.0; j=0; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse) { pixel.red+=(*k)*GetPixelRed(p+u+j); pixel.green+=(*k)*GetPixelGreen(p+u+j); pixel.blue+=(*k)*GetPixelBlue(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse) { pixel.opacity+=(*k)*(p+u+j)->opacity; gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity)); } } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse) { pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma* pixel.index)); } } } else { MagickRealType alpha; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j)); pixel.red+=(*k)*alpha*GetPixelRed(p+u+j); pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j); pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j); pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k)*alpha; } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse) { pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { if (SelectiveContrast(p+u+j,q,threshold) != MagickFalse) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(p+u+j)); pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma* pixel.index)); } } } p++; q++; } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImageChannel) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishMagickMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (shade_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse) { InheritException(exception,&shade_image->exception); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); shade_view=AcquireCacheView(shade_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickRealType distance, normal_distance, shade; PrimaryInfo normal; register const PixelPacket *restrict p, *restrict s0, *restrict s1, *restrict s2; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,image->columns+2,3,exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ s0=p+1; s1=s0+image->columns+2; s2=s1+image->columns+2; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine the surface normal and compute shading. */ normal.x=(double) (PixelIntensity(s0-1)+PixelIntensity(s1-1)+ PixelIntensity(s2-1)-PixelIntensity(s0+1)-PixelIntensity(s1+1)- PixelIntensity(s2+1)); normal.y=(double) (PixelIntensity(s2-1)+PixelIntensity(s2)+ PixelIntensity(s2+1)-PixelIntensity(s0-1)-PixelIntensity(s0)- PixelIntensity(s0+1)); if ((normal.x == 0.0) && (normal.y == 0.0)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance= normal.x*normal.x+normal.y*normal.y+normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } if (gray != MagickFalse) { SetPixelRed(q,shade); SetPixelGreen(q,shade); SetPixelBlue(q,shade); } else { SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1))); SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1))); SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1))); } q->opacity=s1->opacity; s0++; s1++; s2++; q++; } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *SharpenImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception); return(sharp_image); } MagickExport Image *SharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double *kernel, normalize; Image *sharp_image; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double *) AcquireQuantumMemory((size_t) width*width,sizeof(*kernel)); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); normalize=0.0; j=(ssize_t) width/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i]; i++; } } kernel[i/2]=(double) ((-2.0)*normalize); sharp_image=ConvolveImageChannel(image,channel,width,kernel,exception); kernel=(double *) RelinquishMagickMemory(kernel); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a block defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: Choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; RandomInfo **restrict random_info; size_t width; ssize_t y; /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse) { InheritException(exception,&spread_image->exception); spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(spread_image,&bias); width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireCacheView(image); spread_view=AcquireCacheView(spread_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1) #endif for (y=0; y < (ssize_t) spread_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(spread_view); pixel=bias; for (x=0; x < (ssize_t) spread_image->columns; x++) { (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) x+width*(GetPseudoRandomValue( random_info[id])-0.5),(double) y+width*(GetPseudoRandomValue( random_info[id])-0.5),&pixel,exception); SetPixelPacket(spread_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % Image *StatisticImageChannel(const Image *image, % const ChannelType channel,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ #define ListChannels 5 typedef struct _ListNode { size_t next[9], count, signature; } ListNode; typedef struct _SkipList { ssize_t level; ListNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed, signature; SkipList lists[ListChannels]; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { register ssize_t i; if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); for (i=0; i < ListChannels; i++) if (pixel_list->lists[i].nodes != (ListNode *) NULL) pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory( pixel_list->lists[i].nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; register ssize_t i; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; for (i=0; i < ListChannels; i++) { pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL, sizeof(*pixel_list->lists[i].nodes)); if (pixel_list->lists[i].nodes == (ListNode *) NULL) return(DestroyPixelList(pixel_list)); (void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL* sizeof(*pixel_list->lists[i].nodes)); } pixel_list->signature=MagickSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel, const size_t color) { register SkipList *list; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ list=pixel_list->lists+channel; list->nodes[color].signature=pixel_list->signature; list->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=list->level; level >= 0; level--) { while (list->nodes[search].next[level] < color) search=list->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (list->level+2)) level=list->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > list->level) { list->level++; update[list->level]=65536UL; } /* Link the node into the skip-list. */ do { list->nodes[color].next[level]=list->nodes[update[level]].next[level]; list->nodes[update[level]].next[level]=color; } while (level-- > 0); } static MagickPixelPacket GetMaximumPixelList(PixelList *pixel_list) { MagickPixelPacket pixel; register SkipList *list; register ssize_t channel; size_t color, maximum; ssize_t count; unsigned short channels[ListChannels]; /* Find the maximum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; maximum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color > maximum) maximum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) maximum; } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static MagickPixelPacket GetMeanPixelList(PixelList *pixel_list) { MagickPixelPacket pixel; MagickRealType sum; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the mean value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; do { color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; channels[channel]=(unsigned short) sum; } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static MagickPixelPacket GetMedianPixelList(PixelList *pixel_list) { MagickPixelPacket pixel; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the median value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; do { color=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); channels[channel]=(unsigned short) color; } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static MagickPixelPacket GetMinimumPixelList(PixelList *pixel_list) { MagickPixelPacket pixel; register SkipList *list; register ssize_t channel; size_t color, minimum; ssize_t count; unsigned short channels[ListChannels]; /* Find the minimum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; count=0; color=65536UL; minimum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color < minimum) minimum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) minimum; } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static MagickPixelPacket GetModePixelList(PixelList *pixel_list) { MagickPixelPacket pixel; register SkipList *list; register ssize_t channel; size_t color, max_count, mode; ssize_t count; unsigned short channels[5]; /* Make each pixel the 'predominant color' of the specified neighborhood. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; mode=color; max_count=list->nodes[mode].count; count=0; do { color=list->nodes[color].next[0]; if (list->nodes[color].count > max_count) { mode=color; max_count=list->nodes[mode].count; } count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) mode; } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static MagickPixelPacket GetNonpeakPixelList(PixelList *pixel_list) { MagickPixelPacket pixel; register SkipList *list; register ssize_t channel; size_t color, next, previous; ssize_t count; unsigned short channels[5]; /* Finds the non peak value for each of the colors. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; next=list->nodes[color].next[0]; count=0; do { previous=color; color=next; next=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; channels[channel]=(unsigned short) color; } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static MagickPixelPacket GetStandardDeviationPixelList(PixelList *pixel_list) { MagickPixelPacket pixel; MagickRealType sum, sum_squared; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the standard-deviation value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; for (i=0; i < (ssize_t) list->nodes[color].count; i++) sum_squared+=((MagickRealType) color)*((MagickRealType) color); count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum)); } GetMagickPixelPacket((const Image *) NULL,&pixel); pixel.red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel.green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel.blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel.opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel.index=(MagickRealType) ScaleShortToQuantum(channels[4]); return(pixel); } static inline void InsertPixelList(const Image *image,const PixelPacket *pixel, const IndexPacket *indexes,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(GetPixelRed(pixel)); signature=pixel_list->lists[0].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[0].nodes[index].count++; else AddNodePixelList(pixel_list,0,index); index=ScaleQuantumToShort(GetPixelGreen(pixel)); signature=pixel_list->lists[1].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[1].nodes[index].count++; else AddNodePixelList(pixel_list,1,index); index=ScaleQuantumToShort(GetPixelBlue(pixel)); signature=pixel_list->lists[2].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[2].nodes[index].count++; else AddNodePixelList(pixel_list,2,index); index=ScaleQuantumToShort(GetPixelOpacity(pixel)); signature=pixel_list->lists[3].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[3].nodes[index].count++; else AddNodePixelList(pixel_list,3,index); if (image->colorspace == CMYKColorspace) index=ScaleQuantumToShort(GetPixelIndex(indexes)); signature=pixel_list->lists[4].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[4].nodes[index].count++; else AddNodePixelList(pixel_list,4,index); } static inline MagickRealType MagickAbsoluteValue(const MagickRealType x) { if (x < 0) return(-x); return(x); } static void ResetPixelList(PixelList *pixel_list) { int level; register ListNode *root; register SkipList *list; register ssize_t channel; /* Reset the skip-list. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; root=list->nodes+65536UL; list->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; } pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { Image *statistic_image; statistic_image=StatisticImageChannel(image,DefaultChannels,type,width, height,exception); return(statistic_image); } MagickExport Image *StatisticImageChannel(const Image *image, const ChannelType channel,const StatisticType type,const size_t width, const size_t height,ExceptionInfo *exception) { #define StatisticWidth \ (width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width) #define StatisticHeight \ (height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) : height) #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **restrict pixel_list; ssize_t y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse) { InheritException(exception,&statistic_image->exception); statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } pixel_list=AcquirePixelListThreadSet(StatisticWidth,StatisticHeight); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); statistic_view=AcquireCacheView(statistic_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict statistic_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) StatisticWidth/2L),y- (ssize_t) (StatisticHeight/2L),image->columns+StatisticWidth, StatisticHeight,exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view); for (x=0; x < (ssize_t) statistic_image->columns; x++) { MagickPixelPacket pixel; register const IndexPacket *restrict s; register const PixelPacket *restrict r; register ssize_t u, v; r=p; s=indexes+x; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) StatisticHeight; v++) { for (u=0; u < (ssize_t) StatisticWidth; u++) InsertPixelList(image,r+u,s+u,pixel_list[id]); r+=image->columns+StatisticWidth; s+=image->columns+StatisticWidth; } GetMagickPixelPacket(image,&pixel); SetMagickPixelPacket(image,p+StatisticWidth*StatisticHeight/2,indexes+ StatisticWidth*StatisticHeight/2+x,&pixel); switch (type) { case GradientStatistic: { MagickPixelPacket maximum, minimum; minimum=GetMinimumPixelList(pixel_list[id]); maximum=GetMaximumPixelList(pixel_list[id]); pixel.red=MagickAbsoluteValue(maximum.red-minimum.red); pixel.green=MagickAbsoluteValue(maximum.green-minimum.green); pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue); pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity); if (image->colorspace == CMYKColorspace) pixel.index=MagickAbsoluteValue(maximum.index-minimum.index); break; } case MaximumStatistic: { pixel=GetMaximumPixelList(pixel_list[id]); break; } case MeanStatistic: { pixel=GetMeanPixelList(pixel_list[id]); break; } case MedianStatistic: default: { pixel=GetMedianPixelList(pixel_list[id]); break; } case MinimumStatistic: { pixel=GetMinimumPixelList(pixel_list[id]); break; } case ModeStatistic: { pixel=GetModePixelList(pixel_list[id]); break; } case NonpeakStatistic: { pixel=GetNonpeakPixelList(pixel_list[id]); break; } case StandardDeviationStatistic: { pixel=GetStandardDeviationPixelList(pixel_list[id]); break; } } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index)); p++; q++; } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_StatisticImage) #endif proceed=SetImageProgress(image,StatisticImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); return(statistic_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % Image *UnsharpMaskImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double amount,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o amount: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence amount. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double amount,const double threshold, ExceptionInfo *exception) { Image *sharp_image; sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,amount, threshold,exception); return(sharp_image); } MagickExport Image *UnsharpMaskImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double amount,const double threshold,ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); unsharp_image=BlurImageChannel(image,channel,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(MagickRealType) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireCacheView(image); unsharp_view=AcquireCacheView(unsharp_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict unsharp_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view); pixel=bias; for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); if (fabs(2.0*pixel.red) < quantum_threshold) pixel.red=(MagickRealType) GetPixelRed(p); else pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*amount); SetPixelRed(q,ClampToQuantum(pixel.red)); } if ((channel & GreenChannel) != 0) { pixel.green=GetPixelGreen(p)-(MagickRealType) q->green; if (fabs(2.0*pixel.green) < quantum_threshold) pixel.green=(MagickRealType) GetPixelGreen(p); else pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*amount); SetPixelGreen(q,ClampToQuantum(pixel.green)); } if ((channel & BlueChannel) != 0) { pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue; if (fabs(2.0*pixel.blue) < quantum_threshold) pixel.blue=(MagickRealType) GetPixelBlue(p); else pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*amount); SetPixelBlue(q,ClampToQuantum(pixel.blue)); } if ((channel & OpacityChannel) != 0) { pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity; if (fabs(2.0*pixel.opacity) < quantum_threshold) pixel.opacity=(MagickRealType) GetPixelOpacity(p); else pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*amount); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel.index=GetPixelIndex(indexes+x)-(MagickRealType) GetPixelIndex(unsharp_indexes+x); if (fabs(2.0*pixel.index) < quantum_threshold) pixel.index=(MagickRealType) GetPixelIndex(indexes+x); else pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+ (pixel.index*amount); SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImageChannel) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
7791.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute dist_schedule(static, 8) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp parallel for simd schedule(dynamic, 16) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
timer.c
/* * Copyright (c) 2011-2012, Los Alamos National Security, LLC. * All rights Reserved. * * Copyright 2011-2012. Los Alamos National Security, LLC. This software was produced * under U.S. Government contract DE-AC52-06NA25396 for Los Alamos National * Laboratory (LANL), which is operated by Los Alamos National Security, LLC * for the U.S. Department of Energy. The U.S. Government has rights to use, * reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS * ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR * ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified * to produce derivative works, such modified software should be clearly marked, * so as not to confuse it with the version available from LANL. * * Additionally, redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Los Alamos National Security, LLC, Los Alamos * National Laboratory, LANL, the U.S. Government, nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE LOS ALAMOS NATIONAL SECURITY, LLC AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LOS ALAMOS NATIONAL * SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * CLAMR -- LA-CC-11-094 * This research code is being developed as part of the * 2011 X Division Summer Workshop for the express purpose * of a collaborative code for development of ideas in * the implementation of AMR codes for Exascale platforms * * AMR implementation of the Wave code previously developed * as a demonstration code for regular grids on Exascale platforms * as part of the Supercomputing Challenge and Los Alamos * National Laboratory * * Authors: Bob Robey XCP-2 brobey@lanl.gov * Neal Davis davis68@lanl.gov, davis68@illinois.edu * David Nicholaeff dnic@lanl.gov, mtrxknight@aol.com * Dennis Trujillo dptrujillo@lanl.gov, dptru10@gmail.com * */ #include <sys/time.h> #include <stdlib.h> #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "timer.h" void cpu_timer_start(struct timeval *tstart_cpu){ #ifdef _OPENMP if ( omp_in_parallel() ) { #pragma omp master { gettimeofday(tstart_cpu, NULL); } } else { gettimeofday(tstart_cpu, NULL); } #else gettimeofday(tstart_cpu, NULL); #endif } double cpu_timer_stop(struct timeval tstart_cpu){ double result; struct timeval tstop_cpu, tresult; #ifdef _OPENMP if ( omp_in_parallel() ) { #pragma omp master { gettimeofday(&tstop_cpu, NULL); tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec; tresult.tv_usec = tstop_cpu.tv_usec - tstart_cpu.tv_usec; result = (double)tresult.tv_sec + (double)tresult.tv_usec*1.0e-6; } } else { gettimeofday(&tstop_cpu, NULL); tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec; tresult.tv_usec = tstop_cpu.tv_usec - tstart_cpu.tv_usec; result = (double)tresult.tv_sec + (double)tresult.tv_usec*1.0e-6; } #else gettimeofday(&tstop_cpu, NULL); tresult.tv_sec = tstop_cpu.tv_sec - tstart_cpu.tv_sec; tresult.tv_usec = tstop_cpu.tv_usec - tstart_cpu.tv_usec; result = (double)tresult.tv_sec + (double)tresult.tv_usec*1.0e-6; #endif return(result); }
pi_loop.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The program was parallelized using OpenMP by adding just four lines (1) A line to include omp.h -- the include file that contains OpenMP's function prototypes and constants. (2) A pragma that tells OpenMP to create a team of threads (3) A pragma to cause one of the threads to print the number of threads being used by the program. (4) A pragma to split up loop iterations among the team of threads. This pragma includes 2 clauses to (1) create a private variable and (2) to cause the threads to compute their sums locally and then combine their local sums into a single global value. History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> static long num_steps = 100000000; double step; int main () { int i; double x, pi, sum = 0.0; double start_time, run_time; step = 1.0/(double) num_steps; for (i = 1; i <= 4; i++) { sum = 0.0; omp_set_num_threads(i); start_time = omp_get_wtime(); #pragma omp parallel { #pragma omp single // 默认有barrier(栅栏) printf(" num_threads = %d",omp_get_num_threads()); #pragma omp for reduction(+:sum) // 使用reduction对sum进行加法规约,并行执行时每个线程都保存sum拷贝,结束后进行相加。 for (i=1;i<= num_steps; i++){ x = (i-0.5)*step; sum = sum + 4.0/(1.0+x*x); } } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("\n pi is %f in %f seconds and %d threads\n",pi,run_time,i); } }
convolution_sgemm_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void im2col_sgemm_pack8to4_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack8to4_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void im2col_sgemm_pack8to4_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack8to4_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { extern void im2col_sgemm_pack8to4_int8_sse_avx2(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack8to4_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void im2col_sgemm_pack8to4_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); im2col_sgemm_pack8to4_int8_sse_xop(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __AVX2__ if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif { #if __AVX2__ int remain_size_start = 0; int nn_size = size >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; int64_t* tmpptr = tmp.channel(i / 4); for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m256i _v = _mm256_loadu_si256((const __m256i*)img0); _mm256_storeu_si256((__m256i*)tmpptr, _v); tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = size >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __AVX2__ int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else int64_t* tmpptr = tmp.channel(i / 2); #endif for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m128i _v = _mm_loadu_si128((const __m128i*)img0); _mm_storeu_si128((__m128i*)tmpptr, _v); tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __AVX2__ int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else int64_t* tmpptr = tmp.channel(i / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __AVX2__ for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 4); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 __m256i _sum00_11 = _mm256_setzero_si256(); __m256i _sum10_01 = _mm256_setzero_si256(); __m256i _sum02_13 = _mm256_setzero_si256(); __m256i _sum12_03 = _mm256_setzero_si256(); __m256i _sum04_15 = _mm256_setzero_si256(); __m256i _sum14_05 = _mm256_setzero_si256(); __m256i _sum06_17 = _mm256_setzero_si256(); __m256i _sum16_07 = _mm256_setzero_si256(); int j = 0; for (; j < nn; j++) { __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _w23_16 = _mm256_cvtepi8_epi16(_w23); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16); _sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16); _sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16); _sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16); __m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16); __m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16); __m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03)); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03)); #endif __m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16)); __m256i _val23_16 = _mm256_cvtepi8_epi16(_val23); __m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16); _sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16); _sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16); _sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16); #else __m256i _sl04_15 = _mm256_mullo_epi16(_val23_16, _w01_16); __m256i _sh04_15 = _mm256_mulhi_epi16(_val23_16, _w01_16); __m256i _sl14_05 = _mm256_mullo_epi16(_val32_16, _w01_16); __m256i _sh14_05 = _mm256_mulhi_epi16(_val32_16, _w01_16); __m256i _sl06_17 = _mm256_mullo_epi16(_val23_16, _w23_16); __m256i _sh06_17 = _mm256_mulhi_epi16(_val23_16, _w23_16); __m256i _sl16_07 = _mm256_mullo_epi16(_val32_16, _w23_16); __m256i _sh16_07 = _mm256_mulhi_epi16(_val32_16, _w23_16); _sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpacklo_epi16(_sl04_15, _sh04_15)); _sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpacklo_epi16(_sl14_05, _sh14_05)); _sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpacklo_epi16(_sl06_17, _sh06_17)); _sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpacklo_epi16(_sl16_07, _sh16_07)); _sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpackhi_epi16(_sl04_15, _sh04_15)); _sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpackhi_epi16(_sl14_05, _sh14_05)); _sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpackhi_epi16(_sl06_17, _sh06_17)); _sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpackhi_epi16(_sl16_07, _sh16_07)); #endif tmpptr += 32; kptr0 += 32; } // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01); _tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01); _tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05); _tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07); _tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05); _tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07); _sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01); _sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13); _sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05); _sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07); _sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17); __m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0); _sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask); _sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask); _mm256_storeu_si256((__m256i*)outptr0, _sum00_11); _mm256_storeu_si256((__m256i*)(outptr0 + 8), _sum04_15); outptr0 += 16; } #endif for (; i + 1 < size; i += 2) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __AVX2__ __m256i _sum00_11 = _mm256_setzero_si256(); __m256i _sum10_01 = _mm256_setzero_si256(); __m256i _sum02_13 = _mm256_setzero_si256(); __m256i _sum12_03 = _mm256_setzero_si256(); #else __m128i _sum00 = _mm_setzero_si128(); __m128i _sum01 = _mm_setzero_si128(); __m128i _sum02 = _mm_setzero_si128(); __m128i _sum03 = _mm_setzero_si128(); __m128i _sum10 = _mm_setzero_si128(); __m128i _sum11 = _mm_setzero_si128(); __m128i _sum12 = _mm_setzero_si128(); __m128i _sum13 = _mm_setzero_si128(); #endif int j = 0; for (; j < nn; j++) { #if __AVX2__ __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m256i _val01_16 = _mm256_cvtepi8_epi16(_val01); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _w23_16 = _mm256_cvtepi8_epi16(_w23); __m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78); #if __AVXVNNI__ || __AVX512VNNI__ _sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16); _sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16); _sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16); _sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16); #else __m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16); __m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16); __m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16); __m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16); __m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16); __m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16); __m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16); __m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03)); _sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11)); _sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01)); _sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13)); _sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03)); #endif #else __m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr); __m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01); __m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01); __m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23); __m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23); #if __XOP__ _sum00 = _mm_maddd_epi16(_val0, _w0, _sum00); _sum01 = _mm_maddd_epi16(_val0, _w1, _sum01); _sum02 = _mm_maddd_epi16(_val0, _w2, _sum02); _sum03 = _mm_maddd_epi16(_val0, _w3, _sum03); _sum10 = _mm_maddd_epi16(_val1, _w0, _sum10); _sum11 = _mm_maddd_epi16(_val1, _w1, _sum11); _sum12 = _mm_maddd_epi16(_val1, _w2, _sum12); _sum13 = _mm_maddd_epi16(_val1, _w3, _sum13); #else __m128i _sl00 = _mm_mullo_epi16(_val0, _w0); __m128i _sh00 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl01 = _mm_mullo_epi16(_val0, _w1); __m128i _sh01 = _mm_mulhi_epi16(_val0, _w1); __m128i _sl02 = _mm_mullo_epi16(_val0, _w2); __m128i _sh02 = _mm_mulhi_epi16(_val0, _w2); __m128i _sl03 = _mm_mullo_epi16(_val0, _w3); __m128i _sh03 = _mm_mulhi_epi16(_val0, _w3); __m128i _sl10 = _mm_mullo_epi16(_val1, _w0); __m128i _sh10 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl11 = _mm_mullo_epi16(_val1, _w1); __m128i _sh11 = _mm_mulhi_epi16(_val1, _w1); __m128i _sl12 = _mm_mullo_epi16(_val1, _w2); __m128i _sh12 = _mm_mulhi_epi16(_val1, _w2); __m128i _sl13 = _mm_mullo_epi16(_val1, _w3); __m128i _sh13 = _mm_mulhi_epi16(_val1, _w3); _sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01)); _sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02)); _sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03)); _sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00)); _sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01)); _sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02)); _sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03)); _sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11)); _sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12)); _sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13)); _sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10)); _sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11)); _sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12)); _sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13)); #endif #endif tmpptr += 16; kptr0 += 32; } #if __AVX2__ // transpose 4x8 { __m256i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01); _tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03); _tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01); _tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1); _sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1); _sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3); _sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3); } _sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01); _sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03); _sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13); __m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0); _sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask); _mm256_storeu_si256((__m256i*)outptr0, _sum00_11); #else // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum00, _sum01); _tmp1 = _mm_unpacklo_epi32(_sum02, _sum03); _tmp2 = _mm_unpackhi_epi32(_sum00, _sum01); _tmp3 = _mm_unpackhi_epi32(_sum02, _sum03); _sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3); } { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum10, _sum11); _tmp1 = _mm_unpacklo_epi32(_sum12, _sum13); _tmp2 = _mm_unpackhi_epi32(_sum10, _sum11); _tmp3 = _mm_unpackhi_epi32(_sum12, _sum13); _sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum00 = _mm_add_epi32(_sum00, _sum01); _sum02 = _mm_add_epi32(_sum02, _sum03); _sum10 = _mm_add_epi32(_sum10, _sum11); _sum12 = _mm_add_epi32(_sum12, _sum13); _sum00 = _mm_add_epi32(_sum00, _sum02); _sum10 = _mm_add_epi32(_sum10, _sum12); _mm_storeu_si128((__m128i*)outptr0, _sum00); _mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10); #endif outptr0 += 8; } for (; i < size; i++) { #if __AVX2__ const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif int j = 0; for (; j < nn; j++) { #if __AVX2__ __m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr); _val = _mm_cvtepi8_epi16(_val); __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m256i _w01_16 = _mm256_cvtepi8_epi16(_w01); __m256i _w23_16 = _mm256_cvtepi8_epi16(_w23); __m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1); #if __AVXVNNI__ || __AVX512VNNI__ _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16); #else __m256i _sl0_1 = _mm256_mullo_epi16(_valval, _w01_16); __m256i _sh0_1 = _mm256_mulhi_epi16(_valval, _w01_16); __m256i _sl2_3 = _mm256_mullo_epi16(_valval, _w23_16); __m256i _sh2_3 = _mm256_mulhi_epi16(_valval, _w23_16); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl0_1, _sh0_1)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl2_3, _sh2_3)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl0_1, _sh0_1)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl2_3, _sh2_3)); #endif #else __m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr); #if __SSE4_1__ _val = _mm_cvtepi8_epi16(_val); #else _val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val)); #endif __m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0); __m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16)); __m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01); __m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23); __m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01); __m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01); __m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23); __m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23); #if __XOP__ _sum0 = _mm_maddd_epi16(_val, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val, _w1, _sum1); _sum2 = _mm_maddd_epi16(_val, _w2, _sum2); _sum3 = _mm_maddd_epi16(_val, _w3, _sum3); #else __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); __m128i _sl1 = _mm_mullo_epi16(_val, _w1); __m128i _sh1 = _mm_mulhi_epi16(_val, _w1); __m128i _sl2 = _mm_mullo_epi16(_val, _w2); __m128i _sh2 = _mm_mulhi_epi16(_val, _w2); __m128i _sl3 = _mm_mullo_epi16(_val, _w3); __m128i _sh3 = _mm_mulhi_epi16(_val, _w3); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2)); _sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3)); _sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3)); #endif #endif tmpptr += 8; kptr0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); __m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0); __m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1); #endif // transpose 4x4 { __m128i _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = _mm_unpacklo_epi32(_sum0, _sum1); _tmp1 = _mm_unpacklo_epi32(_sum2, _sum3); _tmp2 = _mm_unpackhi_epi32(_sum0, _sum1); _tmp3 = _mm_unpackhi_epi32(_sum2, _sum3); _sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1); _sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1); _sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3); _sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3); } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum0 = _mm_add_epi32(_sum0, _sum2); _mm_storeu_si128((__m128i*)outptr0, _sum0); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(32 * maxk, inch / 8, outch / 4, (size_t)1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); int64_t* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack8to4_int8_sse(bottom_im2col, top_blob, kernel, opt); }
par_vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int hypre_FillResponseParToVectorAll(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); #endif /*-------------------------------------------------------------------------- * hypre_ParVectorCreate *--------------------------------------------------------------------------*/ /* If create is called for HYPRE_NO_GLOBAL_PARTITION and partitioning is NOT null, then it is assumed that it is array of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParVector * hypre_ParVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning ) { hypre_ParVector *vector; HYPRE_Int num_procs, my_id; if (global_size < 0) { hypre_error_in_arg(2); return NULL; } vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); if (!partitioning) { hypre_MPI_Comm_size(comm,&num_procs); #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning); #else hypre_GeneratePartitioning(global_size, num_procs, &partitioning); #endif } hypre_ParVectorAssumedPartition(vector) = NULL; hypre_ParVectorComm(vector) = comm; hypre_ParVectorGlobalSize(vector) = global_size; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParVectorFirstIndex(vector) = partitioning[0]; hypre_ParVectorLastIndex(vector) = partitioning[1]-1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[1] - partitioning[0]); #else hypre_ParVectorFirstIndex(vector) = partitioning[my_id]; hypre_ParVectorLastIndex(vector) = partitioning[my_id+1] - 1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[my_id+1] - partitioning[my_id]); #endif /* set defaults */ hypre_ParVectorOwnsData(vector) = 1; hypre_ParVectorOwnsPartitioning(vector) = 1; hypre_ParVectorActualLocalSize(vector) = 0; return vector; } /*-------------------------------------------------------------------------- * hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParMultiVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning, HYPRE_Int num_vectors ) { /* note that global_size is the global length of a single vector */ hypre_ParVector *vector = hypre_ParVectorCreate( comm, global_size, partitioning ); hypre_ParVectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorDestroy( hypre_ParVector *vector ) { if (vector) { if ( hypre_ParVectorOwnsData(vector) ) { hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector)); } if ( hypre_ParVectorOwnsPartitioning(vector) ) { hypre_TFree(hypre_ParVectorPartitioning(vector), HYPRE_MEMORY_HOST); } if (hypre_ParVectorAssumedPartition(vector)) { hypre_AssumedPartitionDestroy(hypre_ParVectorAssumedPartition(vector)); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorInitialize_v2( hypre_ParVector *vector, HYPRE_MemoryLocation memory_location ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_SeqVectorInitialize_v2(hypre_ParVectorLocalVector(vector), memory_location); hypre_ParVectorActualLocalSize(vector) = hypre_VectorSize(hypre_ParVectorLocalVector(vector)); return hypre_error_flag; } HYPRE_Int hypre_ParVectorInitialize( hypre_ParVector *vector ) { return hypre_ParVectorInitialize_v2(vector, hypre_ParVectorMemoryLocation(vector)); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetDataOwner( hypre_ParVector *vector, HYPRE_Int owns_data ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsData(vector) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetPartitioningOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetPartitioningOwner( hypre_ParVector *vector, HYPRE_Int owns_partitioning ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsPartitioning(vector) = owns_partitioning; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetNumVectors * call before calling hypre_ParVectorInitialize * probably this will do more harm than good, use hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ #if 0 HYPRE_Int hypre_ParVectorSetNumVectors( hypre_ParVector *vector, HYPRE_Int num_vectors ) { HYPRE_Int ierr=0; hypre_Vector *local_vector = hypre_ParVectorLocalVector(v); hypre_SeqVectorSetNumVectors( local_vector, num_vectors ); return ierr; } #endif /*-------------------------------------------------------------------------- * hypre_ParVectorRead *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorRead( MPI_Comm comm, const char *file_name ) { char new_file_name[80]; hypre_ParVector *par_vector; HYPRE_Int my_id, num_procs; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; HYPRE_Int i; FILE *fp; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "r"); hypre_fscanf(fp, "%b\n", &global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); #else for (i=0; i < num_procs; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); partitioning[num_procs] = global_size; #endif par_vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_ParVectorComm(par_vector) = comm; hypre_ParVectorGlobalSize(par_vector) = global_size; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParVectorFirstIndex(par_vector) = partitioning[0]; hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1; #else hypre_ParVectorFirstIndex(par_vector) = partitioning[my_id]; hypre_ParVectorLastIndex(par_vector) = partitioning[my_id+1]-1; #endif hypre_ParVectorPartitioning(par_vector) = partitioning; hypre_ParVectorOwnsData(par_vector) = 1; hypre_ParVectorOwnsPartitioning(par_vector) = 1; hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 ); return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrint( hypre_ParVector *vector, const char *file_name ) { char new_file_name[80]; hypre_Vector *local_vector; MPI_Comm comm; HYPRE_Int my_id, num_procs, i; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; FILE *fp; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(vector); comm = hypre_ParVectorComm(vector); partitioning = hypre_ParVectorPartitioning(vector); global_size = hypre_ParVectorGlobalSize(vector); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_SeqVectorPrint(local_vector,new_file_name); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "w"); hypre_fprintf(fp, "%b\n", global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); #else for (i=0; i < num_procs; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); #endif fclose (fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetConstantValues( hypre_ParVector *v, HYPRE_Complex value ) { hypre_Vector *v_local = hypre_ParVectorLocalVector(v); return hypre_SeqVectorSetConstantValues(v_local,value); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetRandomValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetRandomValues( hypre_ParVector *v, HYPRE_Int seed ) { HYPRE_Int my_id; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); MPI_Comm comm = hypre_ParVectorComm(v); hypre_MPI_Comm_rank(comm,&my_id); seed *= (my_id+1); return hypre_SeqVectorSetRandomValues(v_local, seed); } /*-------------------------------------------------------------------------- * hypre_ParVectorCopy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorCopy( hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorCopy(x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorCloneShallow * returns a complete copy of a hypre_ParVector x - a shallow copy, re-using * the partitioning and data arrays of x *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParVectorCloneShallow( hypre_ParVector *x ) { hypre_ParVector * y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; /* ...This vector owns its local vector, although the local vector doesn't * own _its_ data */ hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(hypre_ParVectorLocalVector(x) ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); return y; } hypre_ParVector * hypre_ParVectorCloneDeep_v2( hypre_ParVector *x, HYPRE_MemoryLocation memory_location ) { hypre_ParVector *y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneDeep_v2( hypre_ParVectorLocalVector(x), memory_location ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); //RL: WHY HERE? return y; } HYPRE_Int hypre_ParVectorMigrate(hypre_ParVector *x, HYPRE_MemoryLocation memory_location) { if (!x) { return hypre_error_flag; } if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(hypre_ParVectorMemoryLocation(x)) ) { hypre_Vector *x_local = hypre_SeqVectorCloneDeep_v2(hypre_ParVectorLocalVector(x), memory_location); hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(x)); hypre_ParVectorLocalVector(x) = x_local; } else { hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(x)) = memory_location; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorScale( HYPRE_Complex alpha, hypre_ParVector *y ) { hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorScale( alpha, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorAxpy( HYPRE_Complex alpha, hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorAxpy( alpha, x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorMassAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassAxpy( HYPRE_Complex *alpha, hypre_ParVector **x, hypre_ParVector *y, HYPRE_Int k, HYPRE_Int unroll ) { HYPRE_Int i; hypre_Vector **x_local; hypre_Vector *y_local = hypre_ParVectorLocalVector(y); x_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_HOST); for (i=0; i < k; i++) { x_local[i] = hypre_ParVectorLocalVector(x[i]); } hypre_SeqVectorMassAxpy( alpha, x_local, y_local, k, unroll); hypre_TFree(x_local, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParVectorInnerProd( hypre_ParVector *x, hypre_ParVector *y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real result = 0.0; HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif return result; } /*-------------------------------------------------------------------------- * hypre_ParVectorMassInnerProd *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassInnerProd( hypre_ParVector *x, hypre_ParVector **y, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); HYPRE_Real *local_result; HYPRE_Int i; hypre_Vector **y_local; y_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_HOST); for (i=0; i < k; i++) { y_local[i] = (hypre_Vector *) hypre_ParVectorLocalVector(y[i]); } local_result = hypre_CTAlloc(HYPRE_Real, k, HYPRE_MEMORY_HOST); hypre_SeqVectorMassInnerProd(x_local, y_local, k, unroll, local_result); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(local_result, result, k, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif hypre_TFree(y_local, HYPRE_MEMORY_HOST); hypre_TFree(local_result, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorMassDotpTwo *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassDotpTwo ( hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector **z, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result_x, HYPRE_Real *result_y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real *local_result, *result; HYPRE_Int i; hypre_Vector **z_local; z_local = hypre_TAlloc(hypre_Vector*, k, HYPRE_MEMORY_HOST); for (i=0; i < k; i++) { z_local[i] = (hypre_Vector *) hypre_ParVectorLocalVector(z[i]); } local_result = hypre_CTAlloc(HYPRE_Real, 2*k, HYPRE_MEMORY_HOST); result = hypre_CTAlloc(HYPRE_Real, 2*k, HYPRE_MEMORY_HOST); hypre_SeqVectorMassDotpTwo(x_local, y_local, z_local, k, unroll, &local_result[0], &local_result[k]); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(local_result, result, 2*k, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif for (i=0; i < k; i++) { result_x[i] = result[i]; result_y[i] = result[k+i]; } hypre_TFree(z_local, HYPRE_MEMORY_HOST); hypre_TFree(local_result, HYPRE_MEMORY_HOST); hypre_TFree(result, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_VectorToParVector: * generates a ParVector from a Vector on proc 0 and distributes the pieces * to the other procs in comm * * the length of vec_starts depends on HYPRE_NO_GLOBAL_PARTITION *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_VectorToParVector ( MPI_Comm comm, hypre_Vector *v, HYPRE_BigInt *vec_starts ) { HYPRE_BigInt global_size; HYPRE_BigInt *global_vec_starts = NULL; HYPRE_BigInt first_index; HYPRE_BigInt last_index; HYPRE_Int local_size; HYPRE_Int num_vectors; HYPRE_Int num_procs, my_id; HYPRE_Int global_vecstride, vecstride, idxstride; hypre_ParVector *par_vector; hypre_Vector *local_vector; HYPRE_Complex *v_data; HYPRE_Complex *local_data; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; HYPRE_Int i, j, k, p; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == 0) { global_size = (HYPRE_BigInt)hypre_VectorSize(v); v_data = hypre_VectorData(v); num_vectors = hypre_VectorNumVectors(v); /* for multivectors */ global_vecstride = hypre_VectorVectorStride(v); } hypre_MPI_Bcast(&global_size,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&num_vectors,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&global_vecstride,1,HYPRE_MPI_INT,0,comm); if ( num_vectors == 1 ) par_vector = hypre_ParVectorCreate(comm, global_size, vec_starts); else par_vector = hypre_ParMultiVectorCreate(comm, global_size, vec_starts, num_vectors); vec_starts = hypre_ParVectorPartitioning(par_vector); first_index = hypre_ParVectorFirstIndex(par_vector); last_index = hypre_ParVectorLastIndex(par_vector); local_size = (HYPRE_Int)(last_index - first_index) + 1; #ifdef HYPRE_NO_GLOBAL_PARTITION if (my_id == 0) { global_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); } hypre_MPI_Gather(&first_index, 1, HYPRE_MPI_BIG_INT, global_vec_starts, 1, HYPRE_MPI_BIG_INT, 0, comm); if (my_id == 0) { global_vec_starts[num_procs] = hypre_ParVectorGlobalSize(par_vector); } #else global_vec_starts = vec_starts; #endif hypre_ParVectorInitialize(par_vector); local_vector = hypre_ParVectorLocalVector(par_vector); local_data = hypre_VectorData(local_vector); vecstride = hypre_VectorVectorStride(local_vector); idxstride = hypre_VectorIndexStride(local_vector); /* so far the only implemented multivector StorageMethod is 0 */ hypre_assert( idxstride==1 ); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); k = 0; for (p = 1; p<num_procs; p++) for (j = 0; j<num_vectors; ++j) { hypre_MPI_Isend( &v_data[(HYPRE_Int) global_vec_starts[p]] + j*global_vecstride, (HYPRE_Int)(global_vec_starts[p+1] - global_vec_starts[p]), HYPRE_MPI_COMPLEX, p, 0, comm, &requests[k++] ); } if (num_vectors == 1) { for (i = 0; i < local_size; i++) local_data[i] = v_data[i]; } else { for (j = 0; j<num_vectors; ++j) { for (i = 0; i < local_size; i++) local_data[i+j*vecstride] = v_data[i+j*global_vecstride]; } } hypre_MPI_Waitall(num_procs-1,requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } else { for ( j=0; j<num_vectors; ++j ) hypre_MPI_Recv( local_data+j*vecstride, local_size, HYPRE_MPI_COMPLEX, 0, 0, comm,&status0 ); } #ifdef HYPRE_NO_GLOBAL_PARTITION if (global_vec_starts) { hypre_TFree(global_vec_starts, HYPRE_MEMORY_HOST); } #endif return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorToVectorAll: * generates a Vector on every proc which has a piece of the data * from a ParVector on several procs in comm, * vec_starts needs to contain the partitioning across all procs in comm *--------------------------------------------------------------------------*/ hypre_Vector * hypre_ParVectorToVectorAll( hypre_ParVector *par_v ) { MPI_Comm comm = hypre_ParVectorComm(par_v); HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v); #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt *vec_starts = hypre_ParVectorPartitioning(par_v); #endif hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v); HYPRE_Int num_procs, my_id; HYPRE_Int num_vectors = hypre_ParVectorNumVectors(par_v); hypre_Vector *vector; HYPRE_Complex *vector_data; HYPRE_Complex *local_data; HYPRE_Int local_size; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int i, j; HYPRE_Int *used_procs; HYPRE_Int num_types, num_requests; HYPRE_Int vec_len, proc_id; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 112, tag2 = 223; HYPRE_Int start; #endif hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION local_size = (HYPRE_Int)(hypre_ParVectorLastIndex(par_v) - hypre_ParVectorFirstIndex(par_v) + 1); /* determine procs which hold data of par_v and store ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_size > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = hypre_ParVectorLastIndex(par_v); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToVectorAll; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), //0, &response_obj, sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_size) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = (HYPRE_Int)send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_Int)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_size) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this vector should be rather small */ local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate((HYPRE_Int)global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); num_requests = 2*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector - here we send to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&vector_data[(HYPRE_Int)new_vec_starts[i]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], tag2, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); #else local_size = (HYPRE_Int)(vec_starts[my_id+1] - vec_starts[my_id]); /* if my_id contains no data, return NULL */ if (!local_size) return NULL; local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate(global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); /* determine procs which hold data of par_v and store ids in used_procs */ num_types = -1; for (i=0; i < num_procs; i++) if (vec_starts[i+1]-vec_starts[i]) num_types++; num_requests = 2*num_types; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); j = 0; for (i=0; i < num_procs; i++) if (vec_starts[i+1]-vec_starts[i] && i-my_id) used_procs[j++] = i; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(vec_starts[proc_id+1] - vec_starts[proc_id]); hypre_MPI_Irecv(&vector_data[vec_starts[proc_id]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, 0, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], 0, comm, &requests[j++]); } for (i=0; i < num_vectors*local_size; i++) vector_data[vec_starts[my_id]+i] = local_data[i]; hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } #endif return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrintIJ( hypre_ParVector *vector, HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt global_size, j; HYPRE_BigInt *partitioning; HYPRE_Complex *local_data; HYPRE_Int myid, num_procs, i, part0; char new_filename[255]; FILE *file; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParVectorComm(vector); global_size = hypre_ParVectorGlobalSize(vector); partitioning = hypre_ParVectorPartitioning(vector); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector)); hypre_fprintf(file, "%b \n", global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } #else for (i=0; i <= num_procs; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } #endif hypre_fprintf(file, "\n"); #ifdef HYPRE_NO_GLOBAL_PARTITION part0 = partitioning[0]; for (j = part0; j < partitioning[1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } #else part0 = partitioning[myid]; for (j = part0; j < partitioning[myid+1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } #endif fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorReadIJ * Warning: wrong base for assumed partition if base > 0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_j_ptr, hypre_ParVector **vector_ptr ) { HYPRE_BigInt global_size, J; hypre_ParVector *vector; hypre_Vector *local_vector; HYPRE_Complex *local_data; HYPRE_BigInt *partitioning; HYPRE_Int base_j; HYPRE_Int myid, num_procs, i, j; char new_filename[255]; FILE *file; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b", &global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION /* this may need to be changed so that the base is available in the file! */ partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 0; i < 2; i++) { hypre_fscanf(file, "%b", partitioning+i); } /* This is not yet implemented correctly! */ base_j = 0; #else partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 1; i <= num_procs; i++) { hypre_fscanf(file, "%b", partitioning+i); partitioning[i] -= partitioning[0]; } base_j = (HYPRE_Int)partitioning[0]; partitioning[0] = 0; #endif vector = hypre_ParVectorCreate(comm, global_size, partitioning); hypre_ParVectorInitialize(vector); local_vector = hypre_ParVectorLocalVector(vector); local_data = hypre_VectorData(local_vector); #ifdef HYPRE_NO_GLOBAL_PARTITION for (j = 0; j < (HYPRE_Int)(partitioning[1] - partitioning[0]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } #else for (j = 0; j < (HYPRE_Int)(partitioning[myid+1] - partitioning[myid]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } #endif fclose(file); *base_j_ptr = base_j; *vector_ptr = vector; /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToVectorAll * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToVectorAll( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the vector * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParVectorLocalSumElts( hypre_ParVector * vector ) { return hypre_SeqVectorSumElts( hypre_ParVectorLocalVector(vector) ); } /* #ifdef HYPRE_USING_UNIFIED_MEMORY hypre_int hypre_ParVectorIsManaged(hypre_ParVector *vector){ if (vector==NULL) return 1; return hypre_SeqVectorIsManaged(hypre_ParVectorLocalVector(vector)); } #endif */ HYPRE_Int hypre_ParVectorGetValues(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_Complex *values) { HYPRE_Int i, j; HYPRE_BigInt first_index, last_index, index; hypre_Vector *local_vector; HYPRE_Complex *data; first_index = hypre_ParVectorFirstIndex(vector); last_index = hypre_ParVectorLastIndex(vector); local_vector = hypre_ParVectorLocalVector(vector); data = hypre_VectorData(local_vector); if (hypre_VectorOwnsData(local_vector) == 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Vector does not own data! -- hypre_ParVectorGetValues."); return hypre_error_flag; } if (indices) { for (i=0; i < num_values; i++) { index = indices[i]; if (index < first_index || index > last_index) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Index out of range! -- hypre_ParVectorGetValues."); return hypre_error_flag; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) { i = (HYPRE_Int)(indices[j] - first_index); values[j] = data[i]; } } else { if (num_values > hypre_VectorSize(local_vector)) { hypre_error_in_arg(2); return hypre_error_flag; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) values[j] = data[j]; } return hypre_error_flag; }
gather_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: jxyang@openailab.com * Update: hhchen@openailab.com */ #include "gather_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> typedef struct { int* in_shape; // the dim of the input int axis; int indices_num; int dim_size; int is_onnx; } gather_param_t; static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread) { float* out_ptr = output; float* in_ptr = input; int axis = param->axis; int outer_size = 1; int inner_size = 1; int axis_size = param->in_shape[axis]; for (int i = 0; i < axis; i++) { outer_size *= param->in_shape[i]; } for (int i = axis + 1; i < param->dim_size; i++) { inner_size *= param->in_shape[i]; // TLOG_ERR("inner_size size: %d %d \n", inner_size, param->in_shape[i]); } // #pragma omp parallel for num_threads(num_thread) if(param->is_onnx){ for (int outer = 0; outer < outer_size; ++outer) { memcpy(out_ptr + (outer * param->indices_num ) * inner_size, in_ptr + (outer* axis_size + param->indices_num) * inner_size, inner_size* sizeof(float)); } } else { for (int outer = 0; outer < outer_size; ++outer) { for (int i = 0; i < param->indices_num; i++) { memcpy(out_ptr + (outer * param->indices_num + i) * inner_size, in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float)); } } } return 0; } static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread) { uint8_t* out_ptr = output; uint8_t* in_ptr = input; int axis = param->axis; int outer_size = 1; int inner_size = 1; int axis_size = param->in_shape[axis]; for (int i = 0; i < axis; i++) { outer_size *= param->in_shape[i]; } for (int i = axis + 1; i < param->dim_size; i++) { inner_size *= param->in_shape[i]; } // #pragma omp parallel for num_threads(num_thread) for (int outer = 0; outer < outer_size; ++outer) { for (int i = 0; i < param->indices_num; i++) { memcpy(out_ptr + (outer * param->indices_num + i) * inner_size, in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size); } } return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem; gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); op_priv_info->axis = gather_param->axis; op_priv_info->indices_num = gather_param->indices_num; op_priv_info->is_onnx = gather_param->is_onnx; op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num*sizeof(int)); /* prerun now */ return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv; int out_size = input_tensor->elem_num; // auto in_dim = input_tensor->GetShape().GetDim(); void* input = input_tensor->data; void* indices_data = indices_tensor->data; op_priv_info->dim_size = input_tensor->dim_num; for (int i = 0; i < op_priv_info->dim_size; i++) { op_priv_info->in_shape[i] = input_tensor->dims[i]; } // TLOG_ERR("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]); // int indices_num = op_param.indices_num; void* output = output_tensor->data; int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_gather_uint8(input, indices_data, output, op_priv_info, exec_graph->num_thread); return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t)); if (op_priv_info == NULL) { return -1; } memset(op_priv_info, 0, sizeof(gather_param_t)); exec_node->ops_priv = op_priv_info; return 0; } static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; gather_param_t* op_param = (gather_param_t*)exec_node->ops_priv; sys_free(op_param->in_shape); return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv; sys_free(op_priv_info); exec_node->ops_priv = NULL; return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops gather_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_gather_ref_op(void* arg) { return register_builtin_node_ops(OP_GATHER, &gather_node_ops); } int unregister_gather_ref_op(void* arg) { return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops); }
opencl_electrum_modern_fmt_plug.c
/* * This software is Copyright (c) 2017 Dhiru Kholia and it is hereby released * to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Based on opencl_pbkdf2_hmac_sha512_fmt_plug.c file. */ #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 #endif #if HAVE_LIBZ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_electrum_modern; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_electrum_modern); #else #include <stdint.h> #include <string.h> #include <zlib.h> #include <openssl/bn.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc.h" #include "arch.h" #include "common.h" #include "formats.h" #include "options.h" #include "common-opencl.h" #include "johnswap.h" #include "secp256k1.h" #include "aes.h" #include "sha2.h" #include "hmac_sha.h" #include "pbkdf2_hmac_common.h" #undef FORMAT_NAME #define FORMAT_NAME "Electrum Wallet 2.8+" #define FORMAT_LABEL "electrum-modern-opencl" #define FORMAT_TAG "$electrum$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "PBKDF2-SHA512 OpenCL" #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 110 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define KERNEL_NAME "pbkdf2_sha512_kernel" #define SPLIT_KERNEL_NAME "pbkdf2_sha512_loop" #define CONFIG_NAME "pbkdf2_sha512" #define HASH_LOOPS 250 #define ITERATIONS 10000 static struct fmt_tests electrum_tests[] = { // Electrum 2.8.0+ encrypted wallets {"$electrum$4*03c2a94eb01e9453c24c9bf49102356788673cc26fbe27b9bf54b0f150758c7864*4249453103c2a94eb01e9453c24c9bf49102356788673cc26fbe27b9bf54b0f150758c7864355ed45b963901b56cd6c483468247c7c8c76ba11c9cb94633575838cffb8f0cebfc9af91ba402c06cca5c08238c643a0291e66e1a849eb66a9eda17e1496d09f46bfe6f63bfdcd591c260f31b92bd5958ce85c7719983a7395c88570946a59d5dcc2188680aba439cde0dbdfeaba985fe3d1a97d25b81573a92f72aea8c60fa3a4228acb789d7f307f6a19d1025fa6ac81d91d45ef07c0b26d9f85fc6ba07246b8b19d641929aac16ff1c942a3d69b824e3e39a122402aed63d3d12ca299416500459e7353bd56db92102c93f045ccc719cee90d2f891ff6b128886ec90768364bcc89c3393f21a5b57915f4eaf4e3b9c7a3958124b43956a47572ae38df2a11b84f6dc25ddc3d3b1968e3adadc756507118301e8cc490d249dc603f4f46c3bf0b214fd3bfb8dab6f048ba7d60dbee031d386a5aeec6664d2891abbeb0201b437d6e37c140be3e6210078e76afafbd78a8acaf45f21cf83c69218f9bfd3abb0211d57ab1874e9d645171cdaad4887a9fea86003b9948d22d9e7bfaec4c4bd0786cd4d191c82c61e83c61bae06a7c9936af46f8fa121ab696aba24ad8fd8f69537aa713bf271e4be567e7e3ccd141511c96ce634175f845ff680f71bbd595ef5d45d9cfd9a7e099fbab7964add7a76c4820b20952121e5621cb53c9476dc23860a5bc4ba3ecf636dc224503202dc11bf3bc88c70dcc2005684f7d3ebe6a7ea1487423a5145442f8f3d806d5d219560b4bce272ef9d6e32849b692cd91d4c60462b0f813603a52dc84b959051e787d890661e9f439a11fa8819c4fb947ff8dd0a5b7e5e63605f4e9f6eac6f8b2bfd7a9098dd2201c2f4cdaa2d7d0691ccf42b2761a8bb2a08c755077a753a41bcf305c83da8cd9ebaeee0360afb4be00827e167b2c1a3d5975d3a4a1e3b3b56794a155253437710ee3c0d0a2de0c4d631b48808fa946146f09e8ea9888d6c6bad104ebed814e79bdc26be38e8580d8fff6324405c128627079d1e3bafc2479274a3bc4f8196e923c835204e91ce8a9cb235c5349056415ad58a83b41254eda57839cd2e0bb66f125e32c76671f6447b2b0321d021c60706ff6f103ce483986fe0f1cc62307f6a1e89c4b2f334fc6f1f2597f5d68b3948c7655025a04ea858bc33eb341de09bdb4862701abcbc4c907270856de6072ee8d0c9e46e19c50eac454d4ca5fcd1a35f5d239aadc82543deafcd17f0eae2145561b8834dd80d337c574d3e931365db294d66aa4b47669f92784325b85abae49a8447a2afeb4cac460cba2a9d7b298bd3f69ac31862b92a970ed8d3241227858b0c40b2f6793cdd733020987beb7e6f01826fa2dae2b345f4e8e96da885a00901b20308f37c8613cf28ef997a6f25c741af917a547b38cff7577d2cac2654d5cdac2d0f1135ac6db3d70174b03c4149d134325f1b805ef11cd62531c13436ad1c7cb73f488dc411d349be34523d477953e8b47848e31ec85230a99ecd88c9cbc5d33de132aacd04877123cff599bea3b2e7b931347673cca605b3bc129496d5e80b06ae0eb3fce5c24ea0f8d2ecd4cfb9ed5034b26ed18b564731c78f5344ec863bd78797ad7de722c7a88e047af0364f69a303dc5f716ebda1de9ca21cb49e4091cb975c17f098932e884f36bded1fab34814931b0aeb72b1bc90747f7f5ebe73c547681f7a8d6d74e7acde2ba6e5e998bd6b035ade5fa64171dde4a82ed5ed7f273220d47bbd5a1c2ed4359d02392b746ba653d1c30f63bce161d0555ebc4775262036be51d4a50113bbac6823fd6a0d387a32673dc454c4d9d018cc25885a0d15d3f7488bbe18398d758cbbf1a24eaf71bd1560ff216e342e09efdbfae2872cfdf59ed802420ba8522edfd74f6d728ffa1683e586b53cbec80f00be6478a44d8df1c69a5cdbb50aa75da2f2dd0a679b037b4173f20b9514064d15ff50f1e9beb0112a41cdc0ecf7fb3028fe6f4c7339bb79d50cb7d43cabd8ae198741677d41e411c811c6267e9b4e41d944b035e47406d5120f1ee192db810cf6774*40c7a179573d57c54d0da0a1c4d71e306e1eea823f637f29c3e43b9792469d15", "openwall123"}, {"$electrum$4*0328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de4*424945310328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de461b1e287a5acff4b40e4abd73ff62dc233c1c7a6a54b3270949281b9d44bc6e746743733360500718826e50bb28ea99a6378dc0b0c578e9d0bf09c667671c82a1bd71c8121edbb4c9cbca93ab0e17e218558ead81755e62b0d4ad547aa1b3beb0b9ee43b11270261c9b38502f00e7f6f096811b7fdae6f3dce85c278d3751fec044027054218ccf20d404bab24380b303f094704e626348a218f44ab88ce2ac5fa7d450069fca3bb53f9359dbbaad0ea1b3859129b19c93ed7888130f8a534f84a629c67edc150a1c5882a83cb0add4615bb569e8dc471de4d38fc8b1e0b9b28040b5ea86093fcdeceaedb6b8f073f6f0ee5541f473a4b1c2bfae4fc91e4bbb40fa2185ecfa4c72010bcf8df05b1a7db45f64307dbc439f8389f0e368e38960b6d61ac88c07ce95a4b03d6d8b13f4c7dc7d7c447097865235ab621aeef38dc4172bf2dc52e701132480127be375fe98834f16d9895dce7f6cdfe900a2ce57eaa6c3036c1b9a661c3c9adbf84f4adfe6d4d9fa9f829f2957cfb353917dc77fd8dd4872b7d90cb71b7d3a29c9bfe3440e02449220acba410fa0af030f51aa2438f7478dbb277d62613112e4eebc66d5d7bdba793fb2073d449954f563284819189ffb5dbcdeb6c95c64bc24e0ef986bce07bafe96ab449ae2b6edaf4f98ffbd392a57bd93c2359444ec4046ae65b440adb96b6e4eef9d06bb04d2f3fa2e4175165bcadbf7e13cc3b6e65e67df901f96a2f154bc763b56b3736a335e1d1bc16e99736f757a4ae56c099645c917360b1ecf8dcefc7281541c6ff65d87cadab4a48f1f6b7b73a3e5a67e2e032abb56b499e73a9f3b69ce065e43b0174639785ae30635d105ebcc827dcf9b19bdd1a92879a5d4bc4e12b5630c188b1b96e3c586e19901b8f96084bcd59b2f4b201a3a8b6e633a5c194901d4609add9671b0bcc12b2b94ae873d201258b36315484e4b9c5f5d6289656baa93eec9e92aec88e2d73d86b9e3d1f24294e3d8ebe9a9f2f6edfbf28f530670c5b086fc4f74df89b4e4cbe06ee7e45cbd238b599d19c2d5da5523b12b1e7050ea0a9b47a5d22c6c3fc476f814f9705dc7ed3aeb1b44fc6b4d69f02a74963dce5057c3c049f92e595a4da5035cffc303a4cb162803aa3f816527a7e466b8424789a0d77e26819615662420c370457e29fcc1938fd754f3acfd21416ce3ab27e9febbc0e24fc7055eddc31e48faa014f9f3695c2e956f0e6c94c507a8d2f8c3aeb4b98b69b6340b6a3acb1acdde9581279f78ee10687616360c018e9f67d6c8bb5950e8fdabd3d0d5808824975aa4a50f88581472212f24ad58a700fe4787642b973924575fe71d1ecd7b2b6acd363f48c40bdd55f35f60a06dee544c266e608fd5a6d263f745e8b11d1160638eb301adfd1a88eddf6d0ccb9e1021e0bde9cf5163583a202b3dc95c255c8cc245a425391163b387c5312d07637272621b94bfde151238c5f55371774ca07603fe3e0a43e92b5cf46096c4c8014e03e730555b61bb544a3998ccd8e45e0f9427c66ce1da1e8cc86d5414fe0d0d49d0a048fb55b76eb3a0a0ba2f1a94227eb6b7b58ff3d410bcd782970689dd026350cbde243de749c27f4647ede96996767354aaf14e336bec47b7498774a519d999f15d424ab34c05254ac835c6df8482c3b6e72b879205392f02f2a666185250ab3b0dd70d219de936495f873b3fe8722026b167437d5fc8fd21aa67ba642da8ca68a5823bc8f6da6fd1a50996a3e4d9fb2bd15909a91f217c512561a502d26c4f0baa0145b4acbcdea8adecbeaeff956e0ec6ae77d35872d2d6351e70c6bb101d824f41a2b7029f16708cd4c8b7a894453f82e79523765de14c82106f74a146c8f76cf20caeb35475e881be1c74a1dc0783b0ff9a40060e362ec3bb5e3dc3919914787893b0dc80123f44a44744f107268eb85437bf3116efa5bb03d6903ebd987291e8574344cadffa7f960789a3ef6c933305e6a80319c9cd9a49d208c4d4070f47c44edea53476b7779cec179af985f7c8b4b91efb56e0a35d4ecb1ff684a1fd0ee8a2d473e00cd8fe3a752cf7b62fffda4ebe90c992caacbee70c7d912d360e5dd24074430cb2b694ff2fcca6eb77d10b1b22a26841af36501d9a066e280d25ca215378e7635fda9ce865ca6af9ae962a3b6039dbba483a5ab7bee69d891c9809483744a0b0ab94498d1ada452e3a90a19decee6bf2b97de274413f78bd896fc2634d3e26d4bde265143deebf580693aa1925aea6f6ce003f195a226b04377e662e0d87b4a09299061f13c4b0ad2d4281eac386c03f533b1d2a9fb466814817bf27aa737cdeda708b1db19f550b2bdc8360a6e4a7ded415d5ef826f67a8c3623c01751af885a428c2b504f12d04d8c673b1ec69a8a6f001951e442cecd11aae4fbc77a5c18f065574d4a28ee1bc5a987903b00dc61e6760695c627437bc7bed48e4fa16eccea6fa878e74dbb010fc52af27f36b6e81e70444ce0f4a83f5aeca326d5a842adba562a0d39410f4f700934b1881b2bebac2215261422b8f474673ef583e5431b183199faa764e1e340f873a104b6d4a0c39ab970e2d77e5f8e7335ea3c68e87a85fd45113eb53acfbc8feb3955f971df7cadafb2c4c9cb789c1de9468329915afe521681af9007e1388d5cca660d9b13325ac31242e0403c1d82d871d2efc0706d58532c4609502a807ebd95e64653e3a631f469c01c89cd70247b11bbb61eb15347023b8280ab44d4ca21d455a913889a541325dec2ef257e6cd3bb3d7830ff465240d132aa6ee0b9146682d86c093b5f1f40ce5368f43198968d85070609a178797300e57291ea0c967e2dbe874136406b58f163e68be4325db28b3c684c020be278a7d126efd215c1fb84350864f18926d9f394b109a308468ead25bf2475e79843bbd7f323219ecb2ab658da2d9ded21953f25383a9952fe2e47c3ed3f11c61b85e388c553a59d896a2eceaaf2d0e826bb77b1bb3e0f8ddbb3e04ec0f811063dd3775423d71f8632a3af2cda84d789afe92d710fd35305abcf1f2dd608ef3319eb4e2b21e94352d06836d83caaf8088ce9bbf78b4c16a962581e8766f4c08bdfbc9920f3ab47fe372816a4e8d0f7d78a622ff16af7d71651e4abb8cc0dd920b4e089df5399b2e1a7d06dbc75870ca1498a045c35bde9938361005cca7ba2bb8573e365406f7e12ba2de2d060a6a794fcc22c4f8289f772c309e3a79df55ca3869b010754e89d596de5aa70c779ec8ecf15788827b565c0abb43bb38c036ce987638a6654294efcbaf7b772fbbd9b00b62f4a898854a67a55006ece2fa37dd2ed18d29fc98343a5f166b2df1c5f1caec075987857286a6b782f529ea0fac91c5d5988813bc5c39201bcc236d51932a1545d30b147d743ce399b2e0c4e3a44b4888b16aff1e4c347ea6caee511424a14fe8bb0d6e8e0eb31be05de81b739f6f2646d0a6bf0dfc1859121402b1cca3b052671c5074796b0a87404b07518ad6b423bde12366e110d842dce8639778163f2f8c895abe32a2320593b4e4c51ed94a325d23c7cc02e46a3bed4b1bc322a6924e14705a4f1d5abf3a7f8853270edf58e0aeb7fd124550729570658752f3e9872e43abeddc8dd226761030a26b25203fd5b053dfebbea0f93835df44b2fcd5ce0a2463df58c88f7bf1798*ec90c1ff54632e7c8cfb812eeb14d7ec49ddaf576dca10bfb16f965e6106ce48", "btcr-test-password"}, // Electrum 2.8.0+ encrypted wallet with truncated hash, "electrum28-wallet" from btcrecover project {"$electrum$5*0328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de4*61b1e287a5acff4b40e4abd73ff62dc233c1c7a6a54b3270949281b9d44bc6e746743733360500718826e50bb28ea99a6378dc0b0c578e9d0bf09c667671c82a1bd71c8121edbb4c9cbca93ab0e17e218558ead81755e62b0d4ad547aa1b3beb0b9ee43b11270261c9b38502f00e7f6f096811b7fdae6f3dce85c278d3751fec044027054218ccf20d404bab24380b303f094704e626348a218f44ab88ce2ac5fa7d450069fca3bb53f9359dbbaad0ea1b3859129b19c93ed7888130f8a534f84a629c67edc150a1c5882a83cb0add4615bb569e8dc471de4d38fc8b1e0b9b28040b5ea86093fcdeceaedb6b8f073f6f0ee5541f473a4b1c2bfae4fc91e4bbb40fa2185ecfa4c72010bcf8df05b1a7db45f64307dbc439f8389f0e368e38960b6d61ac88c07ce95a4b03d6d8b13f4c7dc7d7c447097865235ab621aeef38dc4172bf2dc52e701132480127be375fe98834f16d9895dce7f6cdfe900a2ce57eaa6c3036c1b9a661c3c9adbf84f4adfe6d4d9fa9f829f2957cfb353917dc77fd8dd4872b7d90cb71b7d3a29c9bfe3440e02449220acba410fa0af030f51aa2438f7478dbb277d62613112e4eebc66d5d7bdba793fb2073d449954f563284819189ffb5dbcdeb6c95c64bc24e0ef986bce07bafe96ab449ae2b6edaf4f98ffbd392a57bd93c2359444ec4046ae65b440adb96b6e4eef9d06bb04d2f3fa2e4175165bcadbf7e13cc3b6e65e67df901f96a2f154bc763b56b3736a335e1d1bc16e99736f757a4ae56c099645c917360b1ecf8dcefc7281541c6ff65d87cadab4a48f1f6b7b73a3e5a67e2e032abb56b499e73a9f3b69ce065e43b0174639785ae30635d105ebcc827dcf9b19bdd1a92879a5d4bc4e12b5630c188b1b96e3c586e19901b8f96084bcd59b2f4b201a3a8b6e633a5c194901d4609add9671b0bcc12b2b94ae873d201258b36315484e4b9c5f5d6289656baa93eec9e92aec88e2d73d86b9e3d1f24294e3d8ebe9a9f2f6edfbf28f530670c5b086fc4f74df89b4e4cbe06ee7e45cbd238b599d19c2d5da5523b12b1e7050ea0a9b47a5d22c6c3fc476f814f9705dc7ed3aeb1b44fc6b4d69f02a74963dce5057c3c049f92e595a4da5035cffc303a4cb162803aa3f816527a7e466b8424789a0d77e26819615662420c370457e29fcc1938fd754f3acfd21416ce3ab27e9febbc0e24fc7055eddc31e48faa014f9f3695c2e956f0e6c94c507a8d2f8c3aeb4b98b69b6340b6a3acb1acdde9581279f78ee10687616360c018e9f67d6c8bb5950e8fdabd3d0d5808824975aa4a50f88581472212f24ad58a700fe4787642b973924575fe71d1ecd7b2b6acd363f48c40bdd55f35f60a06dee544c266e608fd5a6d263f745e8b11d1160638eb301adfd1a88eddf6d0ccb9e1021e0bde9cf5163583a202b3dc95c255c8cc24*ec90c1ff54632e7c8cfb812eeb14d7ec49ddaf576dca10bfb16f965e6106ce48", "btcr-test-password"}, {NULL} }; static struct custom_salt { uint32_t type; unsigned char salt[8]; // fake salt uint32_t saltlen; unsigned char ephemeral_pubkey[128]; unsigned char data[16384]; // is 16 KiB enough? uint32_t datalen; unsigned char mac[32]; secp256k1_pubkey pubkey; } *cur_salt; typedef struct { // for plaintext, we must make sure it is a full uint64_t width. uint64_t v[(PLAINTEXT_LENGTH + 7) / 8]; // v must be kept aligned(8) uint64_t length; // keep 64 bit aligned, length is overkill, but easiest way to stay aligned. } pass_t; typedef struct { uint64_t hash[8]; } crack_t; typedef struct { // for salt, we append \x00\x00\x00\x01\x80 and must make sure it is a full uint64 width uint64_t salt[(PBKDF2_64_MAX_SALT_SIZE + 1 + 4 + 7) / 8]; // salt must be kept aligned(8) uint32_t length; uint32_t rounds; } salt_t; typedef struct { uint64_t ipad[8]; uint64_t opad[8]; uint64_t hash[8]; uint64_t W[8]; cl_uint rounds; } state_t; static pass_t *host_pass; /** plain ciphertexts **/ static salt_t *host_salt; /** salt **/ static crack_t *host_crack; /** cracked or no **/ static cl_mem mem_in, mem_out, mem_salt, mem_state; static cl_kernel split_kernel; static cl_int cl_error; static struct fmt_main *self; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; #define STEP 0 #define SEED 256 static const char *warn[] = { "xfer: ", ", init: " , ", crypt: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t min_lws = autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); return MIN(min_lws, autotune_get_task_max_work_group_size(FALSE, 0, split_kernel)); } static void create_clobj(size_t kpc, struct fmt_main *self) { host_pass = mem_calloc(kpc, sizeof(pass_t)); host_crack = mem_calloc(kpc, sizeof(crack_t)); host_salt = mem_calloc(1, sizeof(salt_t)); crypt_out = mem_calloc(kpc, sizeof(*crypt_out)); #define CL_RO CL_MEM_READ_ONLY #define CL_WO CL_MEM_WRITE_ONLY #define CL_RW CL_MEM_READ_WRITE #define CLCREATEBUFFER(_flags, _size, _string) \ clCreateBuffer(context[gpu_id], _flags, _size, NULL, &cl_error); \ HANDLE_CLERROR(cl_error, _string); #define CLKERNELARG(kernel, id, arg, msg) \ HANDLE_CLERROR(clSetKernelArg(kernel, id, sizeof(arg), &arg), msg); mem_in = CLCREATEBUFFER(CL_RO, kpc * sizeof(pass_t), "Cannot allocate mem in"); mem_salt = CLCREATEBUFFER(CL_RO, sizeof(salt_t), "Cannot allocate mem salt"); mem_out = CLCREATEBUFFER(CL_WO, kpc * sizeof(crack_t), "Cannot allocate mem out"); mem_state = CLCREATEBUFFER(CL_RW, kpc * sizeof(state_t), "Cannot allocate mem state"); CLKERNELARG(crypt_kernel, 0, mem_in, "Error while setting mem_in"); CLKERNELARG(crypt_kernel, 1, mem_salt, "Error while setting mem_salt"); CLKERNELARG(crypt_kernel, 2, mem_state, "Error while setting mem_state"); CLKERNELARG(split_kernel, 0, mem_state, "Error while setting mem_state"); CLKERNELARG(split_kernel, 1, mem_out, "Error while setting mem_out"); } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[128]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DPLAINTEXT_LENGTH=%d -DPBKDF2_64_MAX_SALT_SIZE=%d", HASH_LOOPS, PLAINTEXT_LENGTH, PBKDF2_64_MAX_SALT_SIZE); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha512_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], KERNEL_NAME, &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); split_kernel = clCreateKernel(program[gpu_id], SPLIT_KERNEL_NAME, &cl_error); HANDLE_CLERROR(cl_error, "Error creating split kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, sizeof(state_t), 0, db); //Auto tune execution from shared/included code. autotune_run(self, ITERATIONS, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static void release_clobj(void) { if (host_pass) { MEM_FREE(host_pass); MEM_FREE(host_salt); MEM_FREE(host_crack); HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 4 && value != 5) goto err; if ((p = strtokm(NULL, "*")) == NULL) // ephemeral_pubkey goto err; if (hexlenl(p, &extra) > 128 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // data goto err; if (hexlenl(p, &extra) > 16384 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // data goto err; if (hexlenl(p, &extra) > 32 * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; secp256k1_context *ctx; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i, length; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); length = strlen(p) / 2; for (i = 0; i < length; i++) cs.ephemeral_pubkey[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); cs.datalen = strlen(p) / 2; for (i = 0; i < cs.datalen; i++) cs.data[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.mac[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); secp256k1_ec_pubkey_parse(ctx, &cs.pubkey, cs.ephemeral_pubkey, length); secp256k1_context_destroy(ctx); // we append the count and EOM here, one time. memcpy(cs.salt, "\x0\x0\x0\x1\x80", 5); cs.saltlen = 5; // we include the x80 byte in our saltlen, but the .cl kernel knows to reduce saltlen by 1 MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt*)salt; memcpy(host_salt->salt, cur_salt->salt, cur_salt->saltlen); host_salt->length = cur_salt->saltlen; host_salt->rounds = 1024; // fixed HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(salt_t), host_salt, 0, NULL, NULL), "Copy salt to gpu"); } void *electrum_get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static const char *group_order = "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"; // The decypted and decompressed wallet should start with one of these two, // Christopher Gurnee #define EXPECTED_BYTES_1 "{\n \"" #define EXPECTED_BYTES_2 "{\r\n \"" static int crypt_all(int *pcount, struct db_salt *salt) { int i; const int count = *pcount; int index; int loops = (host_salt->rounds + HASH_LOOPS - 1) / HASH_LOOPS; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, global_work_size * sizeof(pass_t), host_pass, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : loops); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], split_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run split kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish"); opencl_process_event(); } // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, global_work_size * sizeof(crack_t), host_crack, 0, NULL, multi_profilingEvent[3]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { BIGNUM *p, *q, *r; BN_CTX *ctx; uint64_t u[8]; unsigned char static_privkey[64]; unsigned char shared_pubkey[33]; unsigned char keys[128]; unsigned char cmac[32]; secp256k1_context *sctx; SHA512_CTX md_ctx; int shared_pubkeylen= 33; int j; memcpy(u, host_crack[index].hash, 64); for (j = 0; j < 8; j++) u[j] = JOHNSWAP64(u[j]); memcpy(static_privkey, u, 64); // do static_privkey % GROUP_ORDER p = BN_bin2bn(static_privkey, 64, NULL); q = BN_new(); r = BN_new(); BN_hex2bn(&q, group_order); ctx = BN_CTX_new(); BN_mod(r, p, q, ctx); BN_CTX_free(ctx); BN_free(p); BN_free(q); BN_bn2bin(r, static_privkey); BN_free(r); sctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); // multiply point with a scaler, shared_pubkey is compressed representation secp256k1_mul(sctx, shared_pubkey, &cur_salt->pubkey, static_privkey); secp256k1_context_destroy(sctx); SHA512_Init(&md_ctx); SHA512_Update(&md_ctx, shared_pubkey, shared_pubkeylen); SHA512_Final(keys, &md_ctx); if (cur_salt->type == 4) { // calculate mac of data hmac_sha256(keys + 32, 32, cur_salt->data, cur_salt->datalen, cmac, 32); memcpy(crypt_out[index], cmac, BINARY_SIZE); } else if (cur_salt->type == 5) { z_stream z; unsigned char iv[16]; unsigned char out[512] = { 0 }; unsigned char fout[512] = { 0 }; AES_KEY aes_decrypt_key; // common zlib settings z.zalloc = Z_NULL; z.zfree = Z_NULL; z.opaque = Z_NULL; z.avail_in = 512; z.avail_out = 512; z.next_out = fout; memcpy(iv, keys, 16); memset(crypt_out[index], 0, BINARY_SIZE); // fast zlib based rejection test, is this totally safe? AES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->data, out, 16, &aes_decrypt_key, iv, AES_DECRYPT); if ((memcmp(out, "\x78\x9c", 2) != 0) || (out[2] & 0x7) != 0x5) { } else { AES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->data + 16, out + 16, 512 - 16, &aes_decrypt_key, iv, AES_DECRYPT); z.next_in = out; inflateInit2(&z, 15); inflate(&z, Z_NO_FLUSH); inflateEnd(&z); if ((memcmp(fout, EXPECTED_BYTES_1, 7) == 0) || (memcmp(fout, EXPECTED_BYTES_2, 8) == 0)) memcpy(crypt_out[index], cur_salt->mac, BINARY_SIZE); // dirty hack! } } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_len = MIN(strlen(key), PLAINTEXT_LENGTH); // make sure LAST uint64 that has any key in it gets null, since we simply // ^= the whole uint64 with the ipad/opad mask strncpy((char*)host_pass[index].v, key, PLAINTEXT_LENGTH); host_pass[index].length = saved_len; } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; memcpy(ret, host_pass[index].v, PLAINTEXT_LENGTH); ret[host_pass[index].length] = 0; return ret; } struct fmt_main fmt_opencl_electrum_modern = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, electrum_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, electrum_get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */ #endif /* HAVE_LIBZ */
GB_unaryop__lnot_int8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_bool // op(A') function: GB_tran__lnot_int8_bool // C type: int8_t // A type: bool // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_bool ( int8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__sqrt_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sqrt_fp64_fp64) // op(A') function: GB (_unop_tran__sqrt_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = sqrt (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sqrt (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = sqrt (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SQRT || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sqrt_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = sqrt (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = sqrt (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sqrt_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
error-correct.c
/** * Coral: short reads error correction with multiple alignments * Copyright (C) 2011 Leena Salmela <leena.salmela@cs.helsinki.fi> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include "reverse.h" #include "multi-align.h" #include "q-gram-index-ht.h" void usage(char *prog_name) { printf("Usage: %s -f[q,s] <input file> -o <output file> [options]\n", prog_name); printf("Required parameters:\n"); printf("-f or -fq or -fs <file> Use -f for a fasta file, -fq for standard fastq file\n"); printf(" and -fs for Solexa fasta file\n"); printf("-o <file> Output file for corrected reads. Format is fasta if\n"); printf(" the input file is fasta and fastq if the input file\n"); printf(" is fastq. The output file cannot be the same file as\n"); printf(" the input file.\n\n"); /* printf("-n <int> The number of reads in the input file.\n\n");*/ printf("Quick options for sequencing technologies:\n"); printf("-454 Equal to: -mr %d -mm %d -g %d\n", MATCH_REWARD_454, MM_PENALTY_454, GAP_PENALTY_454); printf("-illumina Equal to: -mr %d -mm %d -g %d\n\n", MATCH_REWARD_ILLUMINA, MM_PENALTY_ILLUMINA, GAP_PENALTY_ILLUMINA); printf("Other options:\n"); printf("-k <int> The length of a k-mer for indexing the reads. [%d]\n", DEFAULT_Q); /* printf("-m <int> Maximum number of different k-mers indexed (for memory\n"); */ /* printf(" allocation) [%d]\n", MAX_NUM_QGRAMS); */ printf("-e <float> Maximum allowed error rate of a read in the multiple\n"); printf(" alignment. [%.2f]\n", MAX_ERROR_RATE); printf("-t <float> The minimum proportion of agreeing reads to consider\n"); printf(" the consensus of a multiple alignment trustworthy.\n"); printf(" [%.2f]\n", THRESHOLD); printf("-q <float> A threshold for proportion of quality values for bases\n"); printf(" agreeing with the consensus to consider the consensus\n"); printf(" trustworthy. Not used if input is fasta. [%.2f]\n", QUALITY_THRESHOLD); printf("-cq [<int>] If the integer parameter is not present, old quality\n"); printf(" scores are retained for corrected mismatches. If the\n"); printf(" integer parameter is present, the quality scores of\n"); printf(" all corrected bases are set to that value. By default\n"); printf(" quality scores of corrected bases are computed as\n"); printf(" explained in the paper.\n"); printf("-a <int> Maximum number of reads to compute multiple alignments\n"); printf(" for. [%d]\n", MAX_ALIGNED_READS); printf("-p <int> Number of threads to use. [%d]\n", NUM_THREADS); printf("-r <int> The number of times the k-mer index is built during a\n"); printf(" correction run.\n"); printf("-i <int> Index only the first <int> reads. By default all\n"); printf(" reads are indexed.\n"); printf("-j <int> Do not correct the first <int> reads. By default all\n"); printf(" reads are corrected.\n"); printf("-s <file> Write statistics of computed alignments to a file. By\n"); printf(" default statistics are not written.\n"); printf("-c <file> Write the consensus sequences of computed alignments\n"); printf(" to a file. By default the consensus sequences are not\n"); printf(" written.\n"); printf("-mr <int> Reward for matching bases when computing alignments.\n"); printf(" [%d]\n", MATCH_REWARD); printf("-mm <int> Penalty for mismatches when computing alignments. [%d]\n", MM_PENALTY); printf("-g <int> Gap penalty when computing alignments. If gap penalty\n"); printf(" is higher than 100 times mismatch penalty, it is\n"); printf(" assumed that no gaps will occur. [%d]\n", GAP_PENALTY); } #define BUF_SIZE 65536 #define MAX_DUPL_READS 10000 void correct_errors(uchar **reads, int *read_length, char **qual, int64_t num_reads, int q, int max_grams, int p, uchar *num_edits, double max_error_rate, double threshold, double quality_threshold, int max_aligned_reads, int match_reward, int mm_penalty, int gap_penalty, char *stat_file, char *consensus_file, int corrected_quality_scheme, int corrected_quality, int rebuilds, int64_t indexed_reads, int64_t min_correct_id) { char *line, *line2, *lineq, *lineq2; int64_t i, j, k; int64_t ii, jj; FILE *f; FILE *sf; int tid; /* Thread id of the current thread */ int dist; /* Edit distance between the original and corrected read */ read_pos *align_pos; /* Arguments to alignment calculation */ char *pos; char *consensus; /* Consensus sequence returned by alignment calculation */ int clen; /* Length of consensus */ align *alignment; /* Data structures for alignment calculation */ double quality; /* Quality of an alignment */ int share; /* Number of reads that share a k-mer with the base read */ int64_t num_good = 0; /* Number of alignments that were used for correction */ int64_t num_bad = 0; /* Number of alignments that were not good enough for correction */ int64_t num_perfect = 0; /* Number of alignments with no disagreeing columns */ char *used_reads; /* For recording which reads have been used to build a consensus */ int bits; uint64_t mask; /* Bit mask for masking the succinct representations */ uint64_t gram; /* Succinct representation of a q-gram */ uint64_t gram_reverse; /* Succinct representation of the reverse of a q-gram */ uint64_t current; int lastN; index_t *index; int iter; char *aligned_reads; char *corrected_reads; int part; for(i = 0; i < num_reads; i++) { num_edits[i] = 0; } aligned_reads = (char *)malloc(num_reads*sizeof(char)); corrected_reads = (char *)malloc(num_reads*sizeof(char)); if (aligned_reads == NULL || corrected_reads == NULL) { printf("Malloc failed\n"); exit(1); } for(i = 0; i < num_reads; i++) { aligned_reads[i] = 0; corrected_reads[i] = 0; } if (consensus_file != NULL) { f = fopen(consensus_file, "w"); if (!f) { printf("Could not open consensus file %s\n", consensus_file); exit(1); } used_reads = (char *)malloc(num_reads*sizeof(char)); if (used_reads == NULL) { printf("Malloc failed\n"); exit(1); } for(i = 0; i < num_reads; i++) used_reads[i] = 0; } if (stat_file != NULL) { sf = fopen(stat_file, "w"); if (!sf) { printf("Could not open statistics fille %s\n", stat_file); exit(1); } } omp_set_num_threads(p); for(iter = 0; iter < rebuilds; iter++) { if (iter > 0) free_index(index); i = build_index(reads, indexed_reads, q, (void **)&index, (uchar *)PREFIX, max_grams, 1/*(int)(1.0/(1.0-threshold))*/, max_aligned_reads); if (i != 0) { printf("Building q-gram index failed: %s\n", error_index(i)); exit(1); } printf("Q-gram index built.\n"); printf("Correcting reads\n"); #pragma omp parallel private(line, line2, lineq, lineq2, i, j, k, align_pos, pos, consensus, clen, ii, jj, dist, alignment, tid, quality, gram, gram_reverse, lastN, current, bits, mask, share, part) { bits = 2; mask = ((uint64_t)1 << (2*index->q))-1; #pragma omp critical { /* Is malloc thread safe? */ align_pos = (read_pos *)malloc(MAX_DUPL_READS*sizeof(read_pos)); alignment = (align *)malloc(sizeof(align)); line = (char *)malloc(BUF_SIZE*sizeof(char)); line2 = (char *)malloc(BUF_SIZE*sizeof(char)); if (qual != NULL) { lineq = (char *)malloc(BUF_SIZE*sizeof(char)); lineq2 = (char *)malloc(BUF_SIZE*sizeof(char)); } else { lineq = NULL; lineq2 = NULL; } for(i = 0; i < MAX_DUPL_READS; i++) { align_pos[i].edited_read = (char *)malloc(MAX_READ_LENGTH*sizeof(char)); if (qual != NULL) { align_pos[i].edited_qual = (char *)malloc(MAX_READ_LENGTH*sizeof(char)); } } for(i = 0; i < MAX_CONTIG_LEN+10; i++) { alignment->dp[i][0] = 0; alignment->dp_trace[i][0] = 'J'; } for(j = 0; j < MAX_CONTIG_LEN+10; j++) { alignment->dp[0][j] = 0; alignment->dp_trace[0][j] = 'I'; } } tid = omp_get_thread_num(); p = omp_get_num_threads(); printf("Thread %d/%d starting\n", tid, p); /* How to split the work between the threads: * - Threads should not work at the same time with ids close to * each other because if so the likelihood of two threads * working on the same read will be higher. */ for(i = iter*(int64_t)num_reads/rebuilds+tid*10000; i < (iter+1)*(int64_t)num_reads/rebuilds; i++) { if (i >= min_correct_id && num_edits[i] <= max_error_rate*strlen((char *)reads[i])) { gram = 0; gram_reverse = 0; lastN = -1; /* Initialize so that we will index the first q-gram only after reading q chars */ for (part = 0; part < (signed int)strlen((char *)reads[i]); part +=50) { jj = 0; for(j = part; j < part+50 && j < (signed int)strlen((char *)reads[i]); j++) { if (index->reads[i][j] == 'N' || index->reads[i][j] == 'n') { lastN = j; } switch(index->reads[i][j]) { case 'A': case 'a': gram = gram << bits | CODE_A; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_T << ((index->q-1)*bits)); break; case 'C': case 'c': gram = gram << bits | CODE_C; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_G << ((index->q-1)*bits)); break; case 'G': case 'g': gram = gram << bits | CODE_G; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_C << ((index->q-1)*bits)); break; case 'T': case 't': gram = gram << bits | CODE_T; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_A << ((index->q-1)*bits)); break; default: gram = gram << bits; gram_reverse = gram_reverse >> bits; break; } gram = gram & mask; gram_reverse = gram_reverse & mask; if (lastN > j-index->q || gram_reverse == gram) continue; if (gram_reverse < gram) { current = gram_reverse; } else { current = gram; } /* decipher the q-gram (forward and reverse representations, line and line2) */ for(k = 0; k < index->q; k++) { switch((gram >> 2*(index->q-k-1)) & 0x03) { case 0: line[k] = 'A'; line2[index->q-k-1] = 'T'; break; case 1: line[k] = 'C'; line2[index->q-k-1] = 'G'; break; case 2: line[k] = 'G'; line2[index->q-k-1] = 'C'; break; case 3: line[k] = 'T'; line2[index->q-k-1] = 'A'; break; } } line[index->q] = '\0'; line2[index->q] = '\0'; #pragma omp critical { k = (*index->gram2id)[current]-1; } if (k < 0) continue; if (i >= indexed_reads && jj < MAX_DUPL_READS) { /* This read is not in the index so we add it here. */ align_pos[jj].read = i; /* q-gram is always in forward orientation (however, because of using parts of the read as base read, the read might already be corrected and then we cannot find the q-gram anymore) */ pos = strstr((char *)reads[i], line); if (pos != NULL) { align_pos[jj].ori = 'U'; align_pos[jj].pos = (j-index->q+1)- ((long long int)pos -(long long int)reads[i]); align_pos[jj].occ = 1; pos = strstr(pos+1, line); while(pos != NULL) { align_pos[jj].occ++; pos = strstr(pos+1, line); } jj++; } } /* initiliaze input for alignment calculation */ for(ii = 0; ii < (signed int)index->counts[k] && jj < MAX_DUPL_READS; ii++) { if (num_edits[index->gram_reads[k][ii]] > max_error_rate*strlen((char *)reads[index->gram_reads[k][ii]])) continue; align_pos[jj].read = index->gram_reads[k][ii]; /* Search for the q-gram in forward orientation */ pos = strstr((char *)reads[index->gram_reads[k][ii]], line); if (pos != NULL) { align_pos[jj].ori = 'U'; align_pos[jj].pos = (j-index->q+1)- ((long long int)pos -(long long int)reads[index->gram_reads[k][ii]]); align_pos[jj].occ = 1; while(ii + 1 < (signed int)index->counts[k] && index->gram_reads[k][ii+1] == index->gram_reads[k][ii]) { ii++; align_pos[jj].occ++; } jj++; } else { /* Forward q-gram not found, search for the reverse complement */ pos = strstr((char *)reads[index->gram_reads[k][ii]], line2); if (pos != NULL) { align_pos[jj].ori = 'C'; align_pos[jj].pos = (j-index->q+1) - (strlen((const char *)reads[index->gram_reads[k][ii]]) - ((int64_t)pos - (int64_t)reads[index->gram_reads[k][ii]]) - index->q); align_pos[jj].occ = 1; while(ii + 1 < (signed int)index->counts[k] && index->gram_reads[k][ii+1] == index->gram_reads[k][ii]) { ii++; align_pos[jj].occ++; } jj++; } } } } j = jj; if (j >= 1.0/(1.0-threshold) && j < MAX_DUPL_READS) { #pragma omp critical { for(ii = 0; ii < j; ii++) { aligned_reads[align_pos[ii].read] = 1; } } /* Compute multiple alignment */ j = multi_align(alignment, (char **)reads, qual, align_pos, j, max_error_rate, max_aligned_reads, match_reward, mm_penalty, gap_penalty); for(k = 0; k < j; k++) if (align_pos[k].read == (ulong) i) break; clen = get_consensus(alignment, &consensus); consensus[clen] = '\0'; if (k >= j) { quality = 0.0; share = 0; } else { share = kalign_share(alignment, align_pos, j, index->q, k, (char **)reads); } if (share != j && share > 0) { /* Recompute multiple alignment */ j = multi_align(alignment, (char **)reads, qual, align_pos, share, max_error_rate, max_aligned_reads, match_reward, mm_penalty, gap_penalty); for(k = 0; k < j; k++) if (align_pos[k].read == (ulong) i) break; clen = get_consensus(alignment, &consensus); consensus[clen] = '\0'; if (k >= j) { quality = 0.0; share = 0; } else { share = kalign_share(alignment, align_pos, j, index->q, k, (char **)reads); } } if (share == j) { quality = align_quality(alignment, align_pos, j); } else { quality = 0.0; } #pragma omp critical { if (stat_file != NULL) { snprintf(line2, BUF_SIZE, "%ld\t%f\t%d\t%ld\n", i, 1.0-quality, share, j); if (!fwrite(line2, sizeof(char), strlen(line2), sf)) { printf("Could not write to statistics file\n"); } } /* update statistics */ if (quality >= 1.0) { num_perfect++; } else if (quality > 1.0-max_error_rate) { num_good++; } else { num_bad++; } } #ifdef DEBUG #pragma omp critical { /* if (quality > 1.0-max_error_rate && quality < 1.0 && share == j) { */ if (quality < 1.0) { printf("******************\n"); printf("%d: %s\n", i, reads[i]); printf(" %s\n", consensus); for(k = 0; k < j; k++) { printf("%c ", align_pos[k].ori); for(ii = 0; ii < align_pos[k].pos; ii++) { printf(" "); } if (align_pos[k].ori == 'C') { reverse(align_pos[k].edited_read, line2); printf("%s\n", line2); } else { printf("%s\n", align_pos[k].edited_read); } } if (qual != NULL) { for(k = 0; k < j; k++) { printf("%c ", align_pos[k].ori); for(ii = 0; ii < align_pos[k].pos; ii++) { printf(" "); } if (align_pos[k].ori == 'C') { for(ii = strlen(align_pos[k].edited_read)-1; ii >= 0; ii--) { printf("%d ", align_pos[k].edited_qual[ii]); } printf("\n"); } else { for(ii = 0; ii < (signed)strlen(align_pos[k].edited_read); ii++) { printf("%d ", align_pos[k].edited_qual[ii]); } printf("\n"); } } } } printf("OK: %d, Quality: %f Share: %d/%d\n", alignment->ok, quality, share, j); } #endif if (quality > 1.0-max_error_rate && share == j) { #pragma omp critical { for(ii = 0; ii < j; ii++) { corrected_reads[align_pos[ii].read] = 1; } } } if (quality > 1.0-max_error_rate && quality < 1.0 && share == j) { /* Alignment has an appropriate quality for correction */ clen = get_consensus(alignment, &consensus); /* Quality values for the consensus */ int cons_qual[2*MAX_READ_LENGTH+1]; int tot_qual[2*MAX_READ_LENGTH+1]; if (qual != NULL) { for(k = 0; k < clen; k++) { cons_qual[k] = 0; tot_qual[k] = 0; } for(k = 0; k < j; k++) { /* Copy to line2 a representation of the edited read in the same orientation as the consensus */ if (align_pos[k].ori == 'U') { strcpy(line2, align_pos[k].edited_read); if (qual != NULL) memcpy(lineq2, align_pos[k].edited_qual, strlen(line2)); } else { reverse(align_pos[k].edited_read, line2); if (qual != NULL) { for(jj = 0; jj < (signed)strlen(line2); jj++) lineq2[strlen(line2)-jj-1] = align_pos[k].edited_qual[jj]; lineq2[strlen(line2)] = '\0'; } } for(ii = 0; ii < (signed int)strlen(line2); ii++) { if (consensus[align_pos[k].pos + ii] == line2[ii]) { cons_qual[align_pos[k].pos + ii] += lineq2[ii]; } tot_qual[align_pos[k].pos + ii] += lineq2[ii]; } } } for(k = 0; k < j; k++) { if ((signed)align_pos[k].read <= min_correct_id) { continue; } if (align_pos[k].edits > 0) { /* Copy to line2 a representation of the edited read in the same orientation as the consensus */ if (align_pos[k].ori == 'U') { strcpy(line2, align_pos[k].edited_read); if (qual != NULL) memcpy(lineq2, align_pos[k].edited_qual, strlen(line2)); } else { reverse(align_pos[k].edited_read, line2); if (qual != NULL) { for(jj = 0; jj < (signed)strlen(line2); jj++) lineq2[strlen(line2)-jj-1] = align_pos[k].edited_qual[jj]; lineq2[strlen(line2)] = '\0'; } } #ifdef DEBUG printf("Original read %s\n", reads[align_pos[k].read]); #endif jj = 0; dist = 0; for(ii = 0; ii < (signed int)strlen(line2); ii++) { int consC; /* Number of reads supporting consensus at this position */ //int editC; /* Number of reads supporting this read at this position */ switch(consensus[align_pos[k].pos+ii]) { case 'A': case 'a': consC = alignment->contigA[align_pos[k].pos+ii]; break; case 'C': case 'c': consC = alignment->contigC[align_pos[k].pos+ii]; break; case 'G': case 'g': consC = alignment->contigG[align_pos[k].pos+ii]; break; case 'T': case 't': consC = alignment->contigT[align_pos[k].pos+ii]; break; case 'N': case 'n': case '-': consC = alignment->contigN[align_pos[k].pos+ii]; break; default: consC = 0; break; } /* switch(line2[ii]) { */ /* case 'A': */ /* case 'a': */ /* editC = alignment->contigA[align_pos[k].pos+ii]; */ /* break; */ /* case 'C': */ /* case 'c': */ /* editC = alignment->contigC[align_pos[k].pos+ii]; */ /* break; */ /* case 'G': */ /* case 'g': */ /* editC = alignment->contigG[align_pos[k].pos+ii]; */ /* break; */ /* case 'T': */ /* case 't': */ /* editC = alignment->contigT[align_pos[k].pos+ii]; */ /* break; */ /* case 'N': */ /* case 'n': */ /* case '-': */ /* editC = alignment->contigN[align_pos[k].pos+ii]; */ /* break; */ /* default: */ /* editC = 0; */ /* break; */ /* } */ if ((double)consC / (double)(alignment->contigA[align_pos[k].pos+ii]+ alignment->contigC[align_pos[k].pos+ii]+ alignment->contigG[align_pos[k].pos+ii]+ alignment->contigT[align_pos[k].pos+ii]+ alignment->contigN[align_pos[k].pos+ii]) >= threshold && (qual == NULL? 1 : (double)cons_qual[align_pos[k].pos+ii]/(double)tot_qual[align_pos[k].pos+ii] >= quality_threshold) /*cons_qual[align_pos[k].pos+ii] - lineq2[ii] > quality_threshold)*/) { if (line2[ii] != consensus[align_pos[k].pos+ii]) dist++; if (consensus[align_pos[k].pos+ii] != '-') { line[jj] = consensus[align_pos[k].pos+ii]; if (qual != NULL) { if (line2[ii] == consensus[align_pos[k].pos+ii]) { lineq[jj] = lineq2[ii]; } else { if (corrected_quality_scheme == FLAT) { lineq[jj] = corrected_quality; } else if (corrected_quality_scheme == ESTIMATE || line2[ii] == '-') { lineq[jj] = cons_qual[align_pos[k].pos+ii]/consC; } else { lineq[jj] = lineq2[ii]; } } } jj++; } } else { if (line2[ii] != '-') { line[jj] = line2[ii]; if (qual != NULL) lineq[jj] = lineq2[ii]; jj++; } } } line[jj] = '\0'; if (qual != NULL) lineq[jj] = '\0'; // Check that the corrected read fits into the array if ((int)strlen(line) <= read_length[align_pos[k].read] + MAX_INSERTIONS) { /* Copy the original read in the original orientation to the reads array */ #pragma omp critical { if (align_pos[k].ori == 'U') { strcpy((char *)reads[align_pos[k].read], line); if (qual != NULL) memcpy(qual[align_pos[k].read], lineq, strlen(line)); } else { reverse(line, (char *)reads[align_pos[k].read]); if (qual != NULL) { for(jj = 0; jj < (signed)strlen(line); jj++) { qual[align_pos[k].read][strlen(line)-jj-1] = lineq[jj]; } qual[align_pos[k].read][strlen(line)] = '\0'; } } #ifdef DEBUG printf("Corrected read %s\n", reads[align_pos[k].read]); #endif num_edits[align_pos[k].read] += dist; } } } } } else { j = share; } if (quality > 1.0-max_error_rate) { #pragma omp critical { /* Write consensus sequence to file */ if (consensus_file != NULL) { for(ii = 0, jj = 0; jj < (int64_t)strlen(consensus); jj++) { if (consensus[jj] != '-') { consensus[ii] = consensus[jj]; ii++; } } consensus[ii] = '\0'; snprintf(line, BUF_SIZE, ">consensus_%ld (%f)\n%s\n", i, quality, consensus); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Writing to consensus file failed\n"); } for(k = 0; k < j; k++) { used_reads[align_pos[k].read] = 1; } } } } } } } /* Print some hint of progress */ if (i % 10000 == 9999) { printf("Thread %d: %ld/%ld Perfect alignments: %ld, Good alignments: %ld, Bad alignments: %ld\n", tid, i, num_reads, num_perfect, num_good, num_bad); /* printf("Thread %d: %d/%d\n", tid, i, num_reads); */ i += (p-1)*10000; } } #pragma omp critical { for(i = 0; i < MAX_DUPL_READS; i++) { free(align_pos[i].edited_read); if (qual != NULL) free(align_pos[i].edited_qual); } free(align_pos); free(alignment); free(line); free(line2); if (qual != NULL) { free(lineq); free(lineq2); } printf("Thread %d/%d finishing\n", tid, p); } } // end omp parallel } printf("Perfect alignments: %ld, Good alignments: %ld, Bad alignments: %ld\n", num_perfect, num_good, num_bad); int num_aligned = 0; int num_corrected = 0; for(i = 0; i < num_reads; i++) { if (aligned_reads[i] > 0) num_aligned++; if (corrected_reads[i] > 0) num_corrected++; } printf("%d reads were aligned, %d reads were aligned in a good alignment\n", num_aligned, num_corrected); if (stat_file != NULL) fclose(sf); line = (char *)malloc(BUF_SIZE*sizeof(char)); if (consensus_file != NULL) { for(i = 0; i < num_reads; i++) { if (!used_reads[i]) { snprintf(line, BUF_SIZE, ">unused_read_%ld\n%s\n", i, reads[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to consensus file\n"); } } } fclose(f); } } /** * Main routine of error correction. * Command line options: * -f[q] <input file> * -o <output file> * -n <number of reads> * -k <k-mer length> * -m <maximum number of different k-mers> * -e <max error rate for alignment> * -t <minimum proportion of agreeing reads to consider the consensus trustworthy> * -q <threshold for the sum of quality values of bases agreeing with consensus minus * the quality of the base to correct> * -a <maximum number of reads to compute multiple alignments for> * -p <number of threads to use> * -pr <prefix for considered q-grams> (flag not implemented) * -s <statistics file> * -c <consensus file> * -r <number of rebuilds> */ int main(int argc, char *argv[]) { char *fasta_file = NULL; char *fastq_file = NULL; int64_t num_reads = 0; char *out_file = NULL; char *stat_file = NULL; char *consensus_file = NULL; uchar **reads; char **qual; int *read_length; char *line = (char *)malloc(BUF_SIZE*sizeof(char)); char *line2 = (char *)malloc(BUF_SIZE*sizeof(char)); int64_t i,k,j,jj; int64_t ii; FILE *f; FILE *of; uchar *buf4reads = NULL; char *buf4qual = NULL; int c; int p = NUM_THREADS; /* Number of threads to use */ /* Number of edit operations performed on each read */ uchar *num_edits; int q = DEFAULT_Q; /* The length of a q-gram */ char *prog_name = argv[0]; uint64_t max_grams = MAX_NUM_QGRAMS; double max_error_rate = MAX_ERROR_RATE; double threshold = THRESHOLD; double quality_threshold = QUALITY_THRESHOLD; int max_aligned_reads = MAX_ALIGNED_READS; int match_reward = MATCH_REWARD; int mm_penalty = MM_PENALTY; int gap_penalty = GAP_PENALTY; int corrected_quality_scheme = ESTIMATE; int corrected_quality = 0; int quality_offset = 64; int rebuilds = 1; int64_t indexed_reads = -1; int64_t min_correct_id = -1; while(argc > 0) { if (!strcmp(argv[0], "-f")) { if (argc > 1) { fasta_file = argv[1]; fastq_file = NULL; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-fq")) { if (argc > 1) { fastq_file = argv[1]; fasta_file = NULL; quality_offset = 33; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-fs")) { if (argc > 1) { fastq_file = argv[1]; fasta_file = NULL; quality_offset = 64; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-o")) { if (argc > 1) { out_file = argv[1]; } else { usage(prog_name); return 1; } argc--; argv++; /* } else if (!strcmp(argv[0], "-n")) { */ /* if (argc > 1) { */ /* num_reads = atoi(argv[1]); */ /* } else { */ /* usage(prog_name); */ /* return 1; */ /* } */ /* argc--; */ /* argv++; */ } else if (!strcmp(argv[0], "-k")) { if (argc > 1) { q = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-p")) { if (argc > 1) { p = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; /* } else if (!strcmp(argv[0], "-m")) { */ /* if (argc > 1) { */ /* max_grams = atol(argv[1]); */ /* } else { */ /* usage(prog_name); */ /* return 1; */ /* } */ /* argc--; */ /* argv++; */ } else if (!strcmp(argv[0], "-e")) { if (argc > 1) { max_error_rate = atof(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-t")) { if (argc > 1) { threshold = atof(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-q")) { if (argc > 1) { quality_threshold = atof(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-cq")) { if (argc > 1 && argv[1][0] != '-') { corrected_quality_scheme = FLAT; corrected_quality = atoi(argv[1]); argc--; argv++; } else { corrected_quality_scheme = KEEP; } } else if (!strcmp(argv[0], "-a")) { if (argc > 1) { max_aligned_reads = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-c")) { if (argc > 1) { consensus_file = argv[1]; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-s")) { if (argc > 1) { stat_file = argv[1]; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-mr")) { if (argc > 1) { match_reward = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-mm")) { if (argc > 1) { mm_penalty = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-g")) { if (argc > 1) { gap_penalty = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-r")) { if (argc > 1) { rebuilds = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-i")) { if (argc > 1) { indexed_reads = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-j")) { if (argc > 1) { min_correct_id = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-454")) { match_reward = MATCH_REWARD_454; mm_penalty = MM_PENALTY_454; gap_penalty = GAP_PENALTY_454; } else if (!strcmp(argv[0], "-illumina")) { match_reward = MATCH_REWARD_ILLUMINA; mm_penalty = MM_PENALTY_ILLUMINA; gap_penalty = GAP_PENALTY_ILLUMINA; } argc--; argv++; } if (out_file == NULL/* || num_reads == 0*/) { usage(prog_name); return 1; } if ((fasta_file != NULL && strcmp(out_file, fasta_file) == 0)|| (fastq_file != NULL && strcmp(out_file, fastq_file) == 0)) { printf("The input and output files must not be the same file!\n"); usage(prog_name); return 1; } i = 0; k = BUF_SIZE; if (fasta_file != NULL) { qual = NULL; printf("Counting reads in file %s\n", fasta_file); f = fopen(fasta_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fasta_file); abort(); } c = fgetc(f); while(c != EOF) { if (c != '>') { printf("Fasta file %s has invalid format\n", fasta_file); abort(); } num_reads++; while((c = fgetc(f)) != '\n' && c != EOF); while((c = fgetc(f)) != '>' && c != EOF); } printf("Found %ld reads\n", num_reads); if (indexed_reads < 0 || indexed_reads > num_reads) indexed_reads = num_reads; reads = (uchar **)malloc(num_reads * sizeof(uchar *)); read_length = (int *)malloc(num_reads * sizeof(int)); num_edits = (uchar *)malloc(num_reads * sizeof(uchar)); printf("Reading reads from file %s\n", fasta_file); /* Read the reads from the file */ f = fopen(fasta_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fasta_file); abort(); } c = fgetc(f); while(c != EOF) { j = 0; line[j++] = c; while((c = fgetc(f)) != '\n' && c != EOF && j < BUF_SIZE) { line[j++] = c; } line[j] = '\0'; if (j >= BUF_SIZE || c == EOF || line[0] != '>') { printf("Fasta file %s has invalid format\n", fasta_file); abort(); } j = 0; while((c = fgetc(f)) != '>' && c != EOF && j < BUF_SIZE) { if (c != '\n') line[j++] = c; } line[j] = '\0'; if (j >= BUF_SIZE) { printf("Read length exceeds the maximum.\n"); abort(); } if (j+MAX_INSERTIONS+1+k > BUF_SIZE) { buf4reads = (uchar *)malloc(BUF_SIZE*sizeof(uchar)); if (buf4reads == NULL) { printf("Could not malloc space for reads\n"); abort(); } k = 0; } strncpy((char *)&buf4reads[k], line, j+1); reads[i] = &buf4reads[k]; read_length[i] = j; i++; k += j+1; for(ii = 0; ii < MAX_INSERTIONS; ii++) { buf4reads[k++] = '\0'; } #ifdef DEBUG_Q printf("Original: %s\n", reads[i-1]); #endif } fclose(f); printf("Found %ld reads\n", i); } else if (fastq_file != NULL) { printf("Counting reads in file %s\n", fastq_file); f = fopen(fastq_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fastq_file); abort(); } c = fgetc(f); while(c != EOF) { if (c != '@') { printf("Fastq file %s has invalid format\n", fastq_file); abort(); } num_reads++; /*comment*/ while((c = fgetc(f)) != '\n' && c != EOF); /*read*/ while((c = fgetc(f)) != '\n' && c != EOF); /*comment*/ while((c = fgetc(f)) != '\n' && c != EOF); /*qualities*/ while((c = fgetc(f)) != '\n' && c != EOF); c = fgetc(f); } printf("Found %ld reads\n", num_reads); if (indexed_reads < 0 || indexed_reads > num_reads) indexed_reads = num_reads; reads = (uchar **)malloc(num_reads * sizeof(uchar *)); read_length = (int *)malloc(num_reads * sizeof(int)); num_edits = (uchar *)malloc(num_reads * sizeof(uchar)); qual = (char **)malloc(num_reads * sizeof(char *)); printf("Reading reads from file %s\n", fastq_file); /* Read the reads from the file */ f = fopen(fastq_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fastq_file); return 1; } while((c = fgetc(f)) != EOF) { if (c != '@') { printf("Fastq file %s has invalid format\n", fastq_file); exit(1); } /* Skip read id */ while(c != '\n' && c != EOF) c = fgetc(f); /* Bases */ j = 0; while((c = fgetc(f)) != '\n') {if (c == EOF) break; line[j++] = c;} line[j] = '\0'; /* Skip the separating line */ if (fgetc(f) != '+') { printf("Fastq file %s has invalid format\n", fastq_file); exit(1); } while(fgetc(f) != '\n'); /* Qualities */ jj = 0; while((c = fgetc(f)) != '\n') { if (c == EOF) break; line2[jj++] = c-quality_offset; } line2[jj] = '\0'; if (j != jj) { printf("Length of bases and qualities not the same: %ld, %ld\n", j, jj); exit(1); } if (j+MAX_INSERTIONS+1+k > BUF_SIZE) { buf4reads = (uchar *)malloc(BUF_SIZE*sizeof(uchar)); buf4qual = (char *)malloc(BUF_SIZE*sizeof(char)); if (buf4reads == NULL || buf4qual == NULL) { printf("Could not malloc space for reads\n"); abort(); } k = 0; } strncpy((char *)&buf4reads[k], line, j+1); memcpy((char *)&buf4qual[k], line2, j+1); //strncpy((char *)&buf4qual[k], line2, j+1); reads[i] = &buf4reads[k]; qual[i] = &buf4qual[k]; read_length[i] = j; i++; k += j+1; for(ii = 0; ii < MAX_INSERTIONS; ii++) { buf4reads[k] = '\0'; buf4qual[k++] = '\0'; } #ifdef DEBUG_Q printf("Original: %s\n", reads[i-1]); printf(" "); for(jj= 0; jj < j; jj++) printf("%d ", qual[i-1][jj]); printf("\n"); #endif } fclose(f); printf("Found %ld reads\n", i); } else { usage(prog_name); abort(); } correct_errors(reads, read_length, qual, num_reads, q, max_grams, p, num_edits, max_error_rate, threshold, quality_threshold, max_aligned_reads, match_reward, mm_penalty, gap_penalty, stat_file, consensus_file, corrected_quality_scheme, corrected_quality, rebuilds, indexed_reads, min_correct_id); print_stats(); printf("Correction finished. Outputting corrected reads.\n"); if (fasta_file) { f = fopen(out_file, "w"); /* Open the original file to read the read names from there */ of = fopen(fasta_file, "r"); if (of == NULL) { printf("Could not open file %s\n", fasta_file); abort(); } c = fgetc(of); for(i = 0; i < num_reads; i++) { /* Read the comment line */ j = 0; line2[j++] = c; while((c = fgetc(of)) != '\n' && c != EOF && j < BUF_SIZE) { line2[j++] = c; } line2[j] = '\0'; if (j >= BUF_SIZE || c == EOF || line2[0] != '>') { printf("Fasta file %s has invalid format\n", fasta_file); abort(); } /* Skip the read */ while((c = fgetc(of)) != '>' && c != EOF); snprintf(line, BUF_SIZE, "%s ( %d edit operations)\n", line2, num_edits[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } snprintf(line, BUF_SIZE, "%s\n", reads[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } } fclose(f); } else if (fastq_file) { f = fopen(out_file, "w"); /* Open the original file to read the comments from there */ of = fopen(fastq_file, "r"); if (of == NULL) { printf("Could not open file %s\n", fastq_file); abort(); } c = fgetc(of); for(i = 0; i < num_reads; i++) { if (c != '@') { printf("Fastq file %s has invalid format\n", fastq_file); abort(); } /*comment*/ j = 0; line2[j++] = c; while((c = fgetc(of)) != '\n' && c != EOF) { line2[j++] = c; } line2[j] = '\0'; /*skip the read*/ while((c = fgetc(of)) != '\n' && c != EOF); snprintf(line, BUF_SIZE, "%s ( %d edit operations)\n", line2, num_edits[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } snprintf(line, BUF_SIZE, "%s\n", reads[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } /*comment*/ j = 0; while((c = fgetc(of)) != '\n' && c != EOF) { line2[j++] = c; } line2[j] = '\0'; /*skip qualities*/ while((c = fgetc(of)) != '\n' && c != EOF); c = fgetc(of); snprintf(line, BUF_SIZE, "%s\n", line2); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } for(j = 0; j < (signed)strlen((char *)reads[i]); j++) { line[j] = qual[i][j]+quality_offset; } line[strlen((char *)reads[i])] = '\n'; line[1+strlen((char *)reads[i])] = '\0'; if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } } fclose(f); fclose(of); } return 0; }
workng-parallel.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #define BODIES 5000 #define TIMESTEPS 100 #define GRAVCONST 0.0000001 // global vars float mass[BODIES]; float vx[BODIES], vy[BODIES]; float x[BODIES], y[BODIES]; float dx, dy, d, F, ax, ay; void testInit(); void testInit2(); void randomInit(); void outputBody(int); int main(int argc, char** argv) { int time, i, j; int mpiId, numMPI; int ompId, numOMP; double timings[10]; int N = BODIES*TIMESTEPS; //init MPI MPI_Init(NULL, NULL); MPI_Comm_rank(MPI_COMM_WORLD, &mpiId); MPI_Comm_size(MPI_COMM_WORLD, &numMPI); //figured out the numLocal; int numLocal = BODIES/numMPI; printf("%d \n", numLocal); #pragma omp parallel default(none) shared(numOMP) { numOMP = omp_get_num_threads(); printf("%d \n", numOMP); } if (N < numMPI*numOMP) { if (mpiId ==0 ) printf("too trivial\n"); // we do not cater for idle cores } if (N%(numMPI*numOMP)!=0){ printf("the array length provided is not evenly divisible between nodes"); MPI_Abort(MPI_COMM_WORLD, 1); } else { if (mpiId == 0){ // testInit2(); timings[0] = omp_get_wtime(); randomInit(); } int start = mpiId*numLocal; printf("start %d \n", start); int end = (mpiId * numLocal) + numLocal; printf("start %d \n", end); //broadcast out the values of the arrays. MPI_Bcast(&vy,BODIES,MPI_FLOAT,0,MPI_COMM_WORLD); MPI_Bcast(&vx,BODIES,MPI_FLOAT,0,MPI_COMM_WORLD); MPI_Bcast(&x,BODIES,MPI_FLOAT,0,MPI_COMM_WORLD); MPI_Bcast(&y,BODIES,MPI_FLOAT,0,MPI_COMM_WORLD); MPI_Bcast(&mass,BODIES,MPI_FLOAT,0,MPI_COMM_WORLD); printf("Broadcast successful"); //this needs to be parallelized with MPI //BCast the variables to the nodes for (time=0; time<TIMESTEPS; time++) { printf("Timestep %d\n",time); timings[2]= omp_get_wtime(); // can this be parallelised by openMP? #pragma omp parallel for for (i=start; i<end; i++) { // calc forces on body i due to bodies (j != i) for (j=start; j<end; j++) { if (j != i) { dx = x[j] - x[i]; dy = y[j] - y[i]; d = sqrt(dx*dx + dy*dy); if (d<0.01) { printf("too close - resetting\n"); d=1.0; } F = GRAVCONST * mass[i] * mass[j] / (d*d); ax = (F/mass[i]) * dx/d; ay = (F/mass[i]) * dy/d; vx[i] += ax; vy[i] += ay; } } // body j } // body i //gather the variables back in //MPI_Reduce(&localvx, &vx, numLocal, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); //MPI_Reduce(&localvy, &vy, numLocal, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); timings[3] = omp_get_wtime(); // having worked out all velocities we now apply and determine new position timings[4] = omp_get_wtime(); //openMP the variables. for (i=0; i<BODIES; i++) { x[i] += vx[i]; y[i] += vy[i]; //DEBUG ONLY: outputBody(i); } timings[5] = omp_get_wtime(); printf("---\n"); } // time if (mpiId ==0 ){ printf("Final data\n"); for (i=0; i<BODIES; i++) { outputBody(i); } timings[1] = omp_get_wtime(); printf("Whole execution: %0.6f\n", (timings[1] - timings[0])*1000); printf("timing for init: %0.6f\n", (timings[2] - timings[1])*1000); printf("timing for velocities: %0.6f\n", (timings[3] - timings[2])*1000); printf("timing for new position: %0.6f \n", (timings[5]-timings[4])*1000); } } MPI_Finalize(); } //Parallelise the printing void randomInit() { int i; //#pragma omp parallel for private(i) reduction(+: mass, x, y, vx, vy) shared(BODIES) for (i=0; i<BODIES; i++) { mass[i] = 0.001 + (float)rand()/(float)RAND_MAX; // 0.001 to 1.001 x[i] = -250.0 + 500.0*(float)rand()/(float)RAND_MAX; // -10 to +10 per axis y[i] = -250.0 + 500.0*(float)rand()/(float)RAND_MAX; // vx[i] = -0.2 + 0.4*(float)rand()/(float)RAND_MAX; // -0.25 to +0.25 per axis vy[i] = -0.2 + 0.4*(float)rand()/(float)RAND_MAX; } printf("Randomly initialised\n"); return; } //Can also Parallelise the init function void testInit() { /* test: initial zero velocity ==> attraction only ie bodies should move closer together */ int i; for (i=0; i<BODIES; i++) { mass[i] = 1.0; x[i] = (float) i; y[i] = (float) i; vx[i] = 0.0; vy[i] = 0.0; } } void testInit2() { /* test data */ mass[0] = 1.0; x[0]=0.0; y[0]=0.0; vx[0]=0.01; vy[0]=0.0; mass[1] = 0.1; x[1]=1.; y[1]=1.; vx[1]=0.; vy[1]=0.; mass[2] = .001; x[2]=0.;; y[2]=1.; vx[2]=.01; vy[2]=-.01; } void outputBody(int i) { printf("Body %d: Position=(%f,%f) Velocity=(%f,%f)\n", i, x[i],y[i], vx[i],vy[i]); return; }
batch_shuffle_message_manager.h
/** Copyright 2020 Alibaba Group Holding Limited. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef GRAPE_PARALLEL_BATCH_SHUFFLE_MESSAGE_MANAGER_H_ #define GRAPE_PARALLEL_BATCH_SHUFFLE_MESSAGE_MANAGER_H_ #include <memory> #include <thread> #include <vector> #include "grape/communication/sync_comm.h" #include "grape/parallel/message_manager_base.h" #include "grape/utils/vertex_array.h" #include "grape/worker/comm_spec.h" namespace grape { /** * @brief A kind of collective message manager. * * This message manager is designed for the scenario that all mirror vertices' * state need to be override with their masters' state, e.g. PageRank. * * After a round, message manager will encode the inner vertices' state of a * vertex array for each other fragment. * * When receive a batch of messages, message manager will update the state of * outer vertices in a designated vertex array. */ class BatchShuffleMessageManager : public MessageManagerBase { public: BatchShuffleMessageManager() : comm_(NULL_COMM) {} ~BatchShuffleMessageManager() { if (ValidComm(comm_)) { MPI_Comm_free(&comm_); } } /** * @brief Inherit */ void Init(MPI_Comm comm) override { MPI_Comm_dup(comm, &comm_); comm_spec_.Init(comm_); fid_ = comm_spec_.fid(); fnum_ = comm_spec_.fnum(); force_terminate_ = false; terminate_info_.Init(fnum_); shuffle_out_buffers_.resize(fnum_); recv_thread_ = std::thread(&BatchShuffleMessageManager::recvThreadRoutine, this); } /** * @brief Inherit */ void Start() override {} /** * @brief Inherit */ void StartARound() override { msg_size_ = 0; to_terminate_ = true; } /** * @brief Inherit */ void FinishARound() override {} /** * @brief Inherit */ bool ToTerminate() override { int flag = force_terminate_ ? 1 : 0; int ret; MPI_Allreduce(&flag, &ret, 1, MPI_INT, MPI_SUM, comm_); if (ret > 0) { terminate_info_.success = false; AllToAll(terminate_info_.info, comm_); return true; } return to_terminate_; } /** * @brief Inherit */ size_t GetMsgSize() const override { return msg_size_; } /** * @brief Inherit */ void Finalize() override { if (!send_reqs_.empty()) { MPI_Waitall(send_reqs_.size(), &send_reqs_[0], MPI_STATUSES_IGNORE); send_reqs_.clear(); } if (!recv_reqs_.empty()) { MPI_Waitall(recv_reqs_.size(), &recv_reqs_[0], MPI_STATUSES_IGNORE); recv_reqs_.clear(); } { size_t v = 1; MPI_Send(&v, sizeof(size_t), MPI_CHAR, comm_spec_.FragToWorker(fid_), 1, comm_); recv_thread_.join(); } MPI_Comm_free(&comm_); comm_ = NULL_COMM; } /** * @brief Synchronize the inner vertices' data of a vertex array to their * mirrors. * * @tparam GRAPH_T * @tparam DATA_T * @param frag * @param data_out The inner vertices data of data_out will be sent. * @param data_in The outer vertices data of data_in will be updated. */ template <typename GRAPH_T, typename DATA_T> void SyncInnerVertices( const GRAPH_T& frag, const VertexArray<DATA_T, typename GRAPH_T::vid_t>& data_out, VertexArray<DATA_T, typename GRAPH_T::vid_t>& data_in, int thread_num = std::thread::hardware_concurrency()) { to_terminate_ = false; if (!send_reqs_.empty()) { MPI_Waitall(send_reqs_.size(), &send_reqs_[0], MPI_STATUSES_IGNORE); send_reqs_.clear(); } if (!recv_reqs_.empty()) { MPI_Waitall(recv_reqs_.size(), &recv_reqs_[0], MPI_STATUSES_IGNORE); recv_reqs_.clear(); recv_from_.clear(); } for (fid_t i = 1; i < fnum_; ++i) { fid_t src_fid = (fid_ + fnum_ - i) % fnum_; auto range = frag.OuterVertices(src_fid); MPI_Request req; MPI_Irecv(&data_in[range.begin()], range.size() * sizeof(DATA_T), MPI_CHAR, comm_spec_.FragToWorker(src_fid), 0, comm_, &req); recv_reqs_.push_back(req); recv_from_.push_back(src_fid); } remaining_reqs_ = fnum_ - 1; for (fid_t i = 1; i < fnum_; ++i) { fid_t dst_fid = (i + fid_) % fnum_; auto& id_vec = frag.MirrorVertices(dst_fid); auto& vec = shuffle_out_buffers_[dst_fid]; vec.clear(); vec.resize(id_vec.size() * sizeof(DATA_T)); DATA_T* buf = reinterpret_cast<DATA_T*>(vec.data()); size_t num = id_vec.size(); #pragma omp parallel for num_threads(thread_num) for (size_t k = 0; k < num; ++k) { buf[k] = data_out[id_vec[k]]; } MPI_Request req; MPI_Isend(vec.data(), vec.size(), MPI_CHAR, comm_spec_.FragToWorker(dst_fid), 0, comm_, &req); msg_size_ += vec.size(); send_reqs_.push_back(req); } } /** * @brief Synchronize the inner vertices' data of a vertex array to their * mirrors. The data_out and data_in are the same vertex array. * * @tparam GRAPH_T * @tparam DATA_T * @param frag * @param data */ template <typename GRAPH_T, typename DATA_T> void SyncInnerVertices(const GRAPH_T& frag, VertexArray<DATA_T, typename GRAPH_T::vid_t>& data, int thread_num = std::thread::hardware_concurrency()) { to_terminate_ = false; if (!send_reqs_.empty()) { MPI_Waitall(send_reqs_.size(), &send_reqs_[0], MPI_STATUSES_IGNORE); send_reqs_.clear(); } if (!recv_reqs_.empty()) { MPI_Waitall(recv_reqs_.size(), &recv_reqs_[0], MPI_STATUSES_IGNORE); recv_reqs_.clear(); recv_from_.clear(); } for (fid_t i = 1; i < fnum_; ++i) { fid_t src_fid = (fid_ + fnum_ - i) % fnum_; auto range = frag.OuterVertices(src_fid); MPI_Request req; MPI_Irecv(&data[range.begin()], range.size() * sizeof(DATA_T), MPI_CHAR, comm_spec_.FragToWorker(src_fid), 0, comm_, &req); recv_reqs_.push_back(req); recv_from_.push_back(src_fid); } remaining_reqs_ = fnum_ - 1; for (fid_t i = 1; i < fnum_; ++i) { fid_t dst_fid = (i + fid_) % fnum_; auto& id_vec = frag.MirrorVertices(dst_fid); auto& vec = shuffle_out_buffers_[dst_fid]; vec.clear(); vec.resize(id_vec.size() * sizeof(DATA_T)); DATA_T* buf = reinterpret_cast<DATA_T*>(vec.data()); size_t num = id_vec.size(); #pragma omp parallel for num_threads(thread_num) for (size_t k = 0; k < num; ++k) { buf[k] = data[id_vec[k]]; } MPI_Request req; MPI_Isend(vec.data(), vec.size(), MPI_CHAR, comm_spec_.FragToWorker(dst_fid), 0, comm_, &req); msg_size_ += vec.size(); send_reqs_.push_back(req); } } /** * @brief This function will block until all outer vertices are updated, that * is, messages from all other fragments are received. */ void UpdateOuterVertices() { MPI_Waitall(recv_reqs_.size(), &recv_reqs_[0], MPI_STATUSES_IGNORE); remaining_reqs_ = 0; recv_reqs_.clear(); recv_from_.clear(); } /** * @brief This function will block until a set of messages from one fragment * are received. * * @return Source fragment id. */ fid_t UpdatePartialOuterVertices() { int index; fid_t ret; MPI_Waitany(recv_reqs_.size(), &recv_reqs_[0], &index, MPI_STATUS_IGNORE); remaining_reqs_--; ret = recv_from_[index]; if (remaining_reqs_ == 0) { recv_reqs_.clear(); recv_from_.clear(); } return ret; } /** * @brief Inherit */ void ForceContinue() override {} /** * @brief Inherit */ void ForceTerminate(const std::string& terminate_info) override { force_terminate_ = true; terminate_info_.info[comm_spec_.fid()] = terminate_info; } /** * @brief Inherit */ const TerminateInfo& GetTerminateInfo() const override { return terminate_info_; } private: void recvThreadRoutine() { std::vector<MPI_Request> recv_thread_reqs(fnum_); std::vector<size_t> numbers(fnum_); for (fid_t src_fid = 0; src_fid < fnum_; ++src_fid) { MPI_Irecv(&numbers[src_fid], sizeof(size_t), MPI_CHAR, comm_spec_.FragToWorker(src_fid), 1, comm_, &recv_thread_reqs[src_fid]); } int index; MPI_Waitany(fnum_, &recv_thread_reqs[0], &index, MPI_STATUS_IGNORE); CHECK(index == static_cast<int>(fid_)); for (fid_t src_fid = 0; src_fid < fnum_; ++src_fid) { if (src_fid != fid_) { MPI_Cancel(&recv_thread_reqs[src_fid]); } } } fid_t fid_; fid_t fnum_; CommSpec comm_spec_; MPI_Comm comm_; std::vector<std::vector<char>> shuffle_out_buffers_; std::vector<MPI_Request> recv_reqs_; std::vector<fid_t> recv_from_; fid_t remaining_reqs_; std::vector<MPI_Request> send_reqs_; size_t msg_size_; std::thread recv_thread_; bool to_terminate_; bool force_terminate_; TerminateInfo terminate_info_; }; } // namespace grape #endif // GRAPE_PARALLEL_BATCH_SHUFFLE_MESSAGE_MANAGER_H_
dnnl_common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file dnnl_common.h * \brief Common header file for DNNL backend subgraph * \author Ciyong Chen */ #ifndef MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_COMMON_H_ #define MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_COMMON_H_ #if MXNET_USE_ONEDNN == 1 #include <vector> #include "operator/numpy/np_matrix_op-inl.h" namespace mxnet { namespace op { template <typename DType> static std::vector<float> GetWeightScales(const NDArray& weight, const NDArray* bias, const float data_scale, bool weight_channelwise_scale) { auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); std::vector<float> weight_scales; const DType* weight_ptr = weight.data().dptr<DType>(); const DType* bias_ptr = bias ? bias->data().dptr<DType>() : nullptr; const auto wshape = weight.shape(); size_t channel = wshape[0]; size_t offset = wshape.ProdShape(1, wshape.ndim()); std::vector<DType> weight_c_min(channel, MaxValue<DType>()); std::vector<DType> weight_c_max(channel, MinValue<DType>()); for (int c = 0; c < static_cast<int>(channel); ++c) { const DType* p1 = weight_ptr + c * offset; for (size_t k = 0; k < offset; ++k) { if (weight_c_min[c] > p1[k]) weight_c_min[c] = p1[k]; if (weight_c_max[c] < p1[k]) weight_c_max[c] = p1[k]; } } if (weight_channelwise_scale) { weight_scales.resize(channel); #pragma omp parallel for num_threads(nthreads) for (int c = 0; c < static_cast<int>(channel); ++c) { float scale = GetQuantizeScale(mshadow::kInt8, weight_c_min[c], weight_c_max[c]); if (bias_ptr && bias_ptr[c]) { // avoid overflow on bias // TODO(zhennan): dnnl has bug to handle INT_MAX in bias, so set the maximum value of bias // to INT_MAX / 2. float scale_max = static_cast<float>(bias_ptr[c] > 0 ? MaxValue<int32_t>() : MinValue<int32_t>()) / 2 / bias_ptr[c] / data_scale; scale = Min(scale, scale_max); } weight_scales[c] = scale; } } else { DType total_min = weight_c_min[0]; DType total_max = weight_c_max[0]; for (size_t c = 0; c < channel; ++c) { if (total_min > weight_c_min[c]) total_min = weight_c_min[c]; if (total_max < weight_c_max[c]) total_max = weight_c_max[c]; } weight_scales.resize(3); weight_scales[0] = GetQuantizeScale(mshadow::kInt8, total_min, total_max); weight_scales[1] = total_min; weight_scales[2] = total_max; } return weight_scales; } static inline void ConvertWeightBias2DNNL(NDArray* weight, NDArray* bias, bool has_bias, const dnnl::memory::desc& weight_md, const dnnl::memory::desc* bias_md, const int num_group, float data_scale, const std::vector<float>& weight_scales, const bool submit = true) { DNNLStream* stream = DNNLStream::Get(); const auto new_weight = NDArray(weight_md); const auto conv_weights_memory = new_weight.GetDNNLData(); dnnl::primitive_attr weight_attr; if (weight_scales.size()) { const int weight_mask = (weight_scales.size()) == 1 ? 0 : 1; weight_attr.set_output_scales(weight_mask, weight_scales); } auto default_weights_memory = GetWeights(*weight, num_group); if (default_weights_memory == nullptr) default_weights_memory = weight->GetDNNLData(); const auto weight_reorder_pd = dnnl::reorder::primitive_desc(*default_weights_memory, *conv_weights_memory, weight_attr); DNNLStream::Get()->RegisterPrimArgs( dnnl::reorder(weight_reorder_pd), {{DNNL_ARG_FROM, *default_weights_memory}, {DNNL_ARG_TO, *conv_weights_memory}}); NDArray new_bias; if (has_bias && data_scale) { std::vector<float> bias_scales(weight_scales.size()); for (size_t c = 0; c < weight_scales.size(); ++c) { bias_scales[c] = weight_scales[c] * data_scale; } new_bias = NDArray(*bias_md); const auto conv_bias_memory = new_bias.GetDNNLData(); const int bias_mask = (bias_scales.size()) == 1 ? 0 : 1; dnnl::primitive_attr bias_attr; bias_attr.set_output_scales(bias_mask, bias_scales); auto bias_weights_memory = bias->GetDNNLData(); const auto bias_reorder_pd = dnnl::reorder::primitive_desc(*bias_weights_memory, *conv_bias_memory, bias_attr); DNNLStream::Get()->RegisterPrimArgs( dnnl::reorder(bias_reorder_pd), {{DNNL_ARG_FROM, *bias_weights_memory}, {DNNL_ARG_TO, *conv_bias_memory}}); } if (submit) stream->Submit(); *weight = new_weight; if (has_bias && data_scale) *bias = new_bias; } static inline bool CheckReshapeConditions(const nnvm::Node& node, const index_t out_index) { const index_t split_output_index = node.inputs[0].index; if (split_output_index != out_index) return false; const auto& reshape_param = nnvm::get<NumpyXReshapeParam>(node.attrs.parsed); const auto newshape = reshape_param.newshape; if (newshape.ndim() != 4 || !(newshape[0] == newshape[1] && newshape[0] == -2)) return false; return true; } static inline bool CheckSwapAxisConditions(const nnvm::Node& node) { auto params = node.attrs.dict; int dim1 = 0, dim2 = 0; if (params.count("dim1") && params.count("dim2")) { dim1 = std::stoi(params.at("dim1")); dim2 = std::stoi(params.at("dim2")); } else { return false; } return ((dim1 == 1 && dim2 == 2) || (dim1 == 2 && dim2 == 1)); } } // namespace op } // namespace mxnet #endif // if MXNET_USE_ONEDNN == 1 #endif // MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_COMMON_H_
munit.c
/* Copyright (c) 2013-2017 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (true)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ /* This section is definitely a bit messy, patches to clean it up * gratefully accepted. */ #define MUNIT_CPU_TIME_METHOD_CLOCK_GETTIME 0 #define MUNIT_CPU_TIME_METHOD_CLOCK 1 #define MUNIT_CPU_TIME_METHOD_GETPROCESSTIMES 2 #define MUNIT_CPU_TIME_METHOD_GETRUSAGE 3 #define MUNIT_WALL_TIME_METHOD_CLOCK_GETTIME 8 #define MUNIT_WALL_TIME_METHOD_GETTIMEOFDAY 9 #define MUNIT_WALL_TIME_METHOD_QUERYPERFORMANCECOUNTER 10 #define MUNIT_WALL_TIME_METHOD_MACH_ABSOLUTE_TIME 11 /* clock_gettime gives us a good high-resolution timer, but on some * platforms you have to link in librt. I don't want to force a * complicated build system, so by default we'll only use * clock_gettime on C libraries where we know the standard c library * is sufficient. If you would like to test for librt in your build * system and add it if necessary, you can define * MUNIT_ALLOW_CLOCK_GETTIME and we'll assume that the necessary * libraries are available. */ #if !defined(MUNIT_ALLOW_CLOCK_GETTIME) # if defined(__GLIBC__) && defined(__GLIBC_MINOR__) # if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17) # define MUNIT_ALLOW_CLOCK_GETTIME # endif # endif #endif /* Solaris advertises _POSIX_TIMERS, and defines * CLOCK_PROCESS_CPUTIME_ID and CLOCK_VIRTUAL, but doesn't actually * implement them. Mingw requires you to link to pthreads instead of * librt (or just libc). */ #if defined(MUNIT_ALLOW_CLOCK_GETTIME) && ((defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)) && !defined(__sun)) # define MUNIT_CPU_TIME_METHOD MUNIT_CPU_TIME_METHOD_CLOCK_GETTIME # define MUNIT_WALL_TIME_METHOD MUNIT_WALL_TIME_METHOD_CLOCK_GETTIME #elif defined(_WIN32) # define MUNIT_CPU_TIME_METHOD MUNIT_CPU_TIME_METHOD_GETPROCESSTIMES # define MUNIT_WALL_TIME_METHOD MUNIT_WALL_TIME_METHOD_QUERYPERFORMANCECOUNTER #elif defined(__MACH__) # define MUNIT_CPU_TIME_METHOD MUNIT_CPU_TIME_METHOD_GETRUSAGE # define MUNIT_WALL_TIME_METHOD MUNIT_WALL_TIME_METHOD_MACH_ABSOLUTE_TIME #else # define MUNIT_CPU_TIME_METHOD MUNIT_CPU_TIME_METHOD_GETRUSAGE # define MUNIT_WALL_TIME_METHOD MUNIT_WALL_TIME_METHOD_GETTIMEOFDAY #endif #if MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_CLOCK_GETTIME #include <time.h> typedef struct timespec MunitCpuClock; #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_CLOCK #include <time.h> typedef clock_t MunitCpuClock; #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_GETPROCESSTIMES #include <windows.h> typedef FILETIME MunitCpuClock; #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_GETRUSAGE #include <sys/time.h> #include <sys/resource.h> typedef struct rusage MunitCpuClock; #endif #if MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_CLOCK_GETTIME #include <time.h> typedef struct timespec MunitWallClock; #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_GETTIMEOFDAY typedef struct timeval MunitWallClock; #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_QUERYPERFORMANCECOUNTER typedef LARGE_INTEGER MunitWallClock; #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_MACH_ABSOLUTE_TIME #include <mach/mach.h> #include <mach/mach_time.h> typedef munit_uint64_t MunitWallClock; #endif static void munit_wall_clock_get_time(MunitWallClock* wallclock) { #if MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_CLOCK_GETTIME if (clock_gettime(CLOCK_MONOTONIC, wallclock) != 0) { fputs("Unable to get wall clock time\n", stderr); exit(EXIT_FAILURE); } #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_QUERYPERFORMANCECOUNTER if (QueryPerformanceCounter(wallclock) == 0) { fputs("Unable to get wall clock time\n", stderr); exit(EXIT_FAILURE); } #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_GETTIMEOFDAY if (gettimeofday(wallclock, NULL) != 0) { fputs("Unable to get wall clock time\n", stderr); exit(EXIT_FAILURE); } #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_MACH_ABSOLUTE_TIME *wallclock = mach_absolute_time(); #endif } #if defined(MUNIT_ENABLE_TIMING) static void munit_cpu_clock_get_time(MunitCpuClock* cpuclock) { #if MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_CLOCK_GETTIME static const clockid_t clock_id = #if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) CLOCK_PROCESS_CPUTIME_ID #elif defined(CLOCK_VIRTUAL) CLOCK_VIRTUAL #else #error No clock found #endif ; if (clock_gettime(clock_id, cpuclock) != 0) { fprintf(stderr, "Unable to get CPU clock time: %s\n", strerror(errno)); exit(EXIT_FAILURE); } #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, cpuclock)) { fputs("Unable to get CPU clock time\n", stderr); exit(EXIT_FAILURE); } #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_CLOCK *cpuclock = clock(); #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_GETRUSAGE if (getrusage(RUSAGE_SELF, cpuclock) != 0) { fputs("Unable to get CPU clock time\n", stderr); exit(EXIT_FAILURE); } #endif } static double munit_wall_clock_get_elapsed(MunitWallClock* start, MunitWallClock* end) { #if MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_CLOCK_GETTIME return (double) (end->tv_sec - start->tv_sec) + (((double) (end->tv_nsec - start->tv_nsec)) / 1000000000); #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; LONGLONG elapsed_ticks; QueryPerformanceFrequency(&Frequency); elapsed_ticks = end->QuadPart - start->QuadPart; return ((double) elapsed_ticks) / ((double) Frequency.QuadPart); #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_GETTIMEOFDAY return (double) (end->tv_sec - start->tv_sec) + (((double) (end->tv_usec - start->tv_usec)) / 1000000); #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t timebase_info = { 0, 0 }; if (timebase_info.denom == 0) (void) mach_timebase_info(&timebase_info); return ((*end - *start) * timebase_info.numer / timebase_info.denom) / 1000000000.0; #endif } static double munit_cpu_clock_get_elapsed(MunitCpuClock* start, MunitCpuClock* end) { #if MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_CLOCK_GETTIME return (double) (end->tv_sec - start->tv_sec) + (((double) (end->tv_nsec - start->tv_nsec)) / 1000000000); #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_GETPROCESSTIMES ULONGLONG start_cpu, end_cpu; start_cpu = start->dwHighDateTime; start_cpu <<= sizeof(DWORD) * 8; start_cpu |= start->dwLowDateTime; end_cpu = end->dwHighDateTime; end_cpu <<= sizeof(DWORD) * 8; end_cpu |= end->dwLowDateTime; return ((double) (end_cpu - start_cpu)) / 10000000; #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_CLOCK return ((double) (*end - *start)) / CLOCKS_PER_SEC; #elif MUNIT_CPU_TIME_METHOD == MUNIT_CPU_TIME_METHOD_GETRUSAGE return (double) ((end->ru_utime.tv_sec + end->ru_stime.tv_sec) - (start->ru_utime.tv_sec + start->ru_stime.tv_sec)) + (((double) ((end->ru_utime.tv_usec + end->ru_stime.tv_usec) - (start->ru_utime.tv_usec + start->ru_stime.tv_usec))) / 1000000); #endif } #endif /* MUNIT_ENABLE_TIMING */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = true; } else { ret = false; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return true; } else { return false; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { MunitWallClock wc; munit_uint32_t seed, state; munit_wall_clock_get_time(&wc); #if MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_CLOCK_GETTIME seed = (munit_uint32_t) wc.tv_nsec; #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_QUERYPERFORMANCECOUNTER seed = (munit_uint32_t) wc.QuadPart; #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_GETTIMEOFDAY seed = (munit_uint32_t) wc.tv_usec; #elif MUNIT_WALL_TIME_METHOD == MUNIT_WALL_TIME_METHOD_MACH_ABSOLUTE_TIME seed = (munit_uint32_t) wc; #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) double cpu_clock; double wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; bool single_parameter_mode; void* user_data; MunitReport report; bool colorize; bool fork; bool show_stderr; bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } static void munit_print_time(FILE* fp, double seconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, seconds); } /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (true); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) MunitWallClock wall_clock_begin, wall_clock_end; MunitCpuClock cpu_clock_begin, cpu_clock_end; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) munit_wall_clock_get_time(&wall_clock_begin); munit_cpu_clock_get_time(&cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) munit_wall_clock_get_time(&wall_clock_end); munit_cpu_clock_get_time(&cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_wall_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_cpu_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0.0, 0.0 #endif }; unsigned int output_l; bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(_WIN32) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = true; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = false; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(_WIN32) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = true; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif munit_restore_stderr(orig_stderr); /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / ((double) report.successful)); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / ((double) report.successful)); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = false; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = true; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(_WIN32) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = true; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = false; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return false; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = false; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0.0; runner.report.wall_clock = 0.0; #endif runner.colorize = false; #if !defined(_WIN32) runner.fork = true; #else runner.fork = false; #endif runner.show_stderr = false; runner.fatal_failures = false; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = true; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = false; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = true; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = true; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = false; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = true; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, false, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, true, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
convolutiondepthwise_3x3_int8.h
// SenseNets is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; //int h = bottom_blob.h; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const signed char *kernel = _kernel; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char *kernel0 = (const signed char *)kernel + p * 9; int *outptr = out; const signed char *img0 = bottom_blob.channel(p); const signed char *r0 = img0; const signed char *r1 = img0 + w; const signed char *r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * (int)kernel0[0]; sum += (int)r0[1] * (int)kernel0[1]; sum += (int)r0[2] * (int)kernel0[2]; sum += (int)r1[0] * (int)kernel0[3]; sum += (int)r1[1] * (int)kernel0[4]; sum += (int)r1[2] * (int)kernel0[5]; sum += (int)r2[0] * (int)kernel0[6]; sum += (int)r2[1] * (int)kernel0[7]; sum += (int)r2[2] * (int)kernel0[8]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
GB_binop__pair_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int64_t // A type: int64_t // A pattern? 1 // B type: int64_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT64 || GxB_NO_PAIR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
grid.c
#include <mpi.h> int *cn_c; int *ce_c; int *ec_c; int *cn_crem; int *ce_crem; int *ec_crem; int *neighbor_map; int *cedge_map; int *ecell_map; int *neighbor_maprem; int *cedge_maprem; int *ecell_maprem; GVAL **neighbor_2Dbuf; GVAL **neighbor_3Dbuf; GVAL **cedge_2Dbuf; GVAL **cedge_3Dbuf; GVAL **ecell_2Dbuf; GVAL **ecell_3Dbuf; GVAL **neighbor_2Dbufrem; GVAL **neighbor_3Dbufrem; GVAL **cedge_2Dbufrem; GVAL **cedge_3Dbufrem; GVAL **ecell_2Dbufrem; GVAL **ecell_3Dbufrem; MPI_Request *mpi_send_requests; MPI_Request *mpi_recv_requests; int comm_tag; int local_cell_blocks; int local_edge_blocks; #include "grid.h" #include "memory.h" int transform(int n, int x, int y) { int rx, ry, s, d = 0, t; for (s = n / 2; s > 0; s /= 2) { rx = (x & s) > 0; ry = (y & s) > 0; d += s * s * ((3 * rx) ^ ry); if (ry == 0) { if (rx) { x = n - 1 - x; y = n - 1 - y; } t = x; x = y; y = t; } } return d; } void create_maps(GRID * g) { int x = g->height, y = g->height; g->map1 = malloc(x * sizeof(int *)); g->map2 = malloc(g->cellCount * sizeof(map_t)); for (int i = 0; i < x; i++) { g->map1[i] = malloc(y * sizeof(int)); for (int j = 0; j < y; j++) { int t = transform(x, i, j); g->map1[i][j] = t; g->map2[t].i = i; g->map2[t].j = j; } } } #ifndef NBRS #error "please define NBRS" #endif #if NBRS==3 int calc_edge_count(GRID * g) { return (g->cellCount * 3) / 2; } void tessellation(GRID * g) { for (int i = 0; i < NBRS; i++) { g->neighbor[i] = malloc((g->cellCount) * sizeof(int)); g->cedge[i] = malloc((g->cellCount) * sizeof(int)); } for (int i = 0; i < 2; i++) { g->ecell[i] = malloc((g->edgeCount) * sizeof(int)); } int x = g->height, y = g->height; for (int i = 0; i < x - 1; i++) for (int j = 0; j < y; j++) g->neighbor[0][g->map1[i][j]] = g->map1[i + 1][j]; for (int j = 0; j < y; j++) g->neighbor[0][g->map1[x - 1][j]] = g->map1[0][j]; for (int i = 1; i < x; i++) for (int j = 0; j < y; j++) g->neighbor[1][g->map1[i][j]] = g->map1[i - 1][j]; for (int j = 0; j < y; j++) g->neighbor[1][g->map1[0][j]] = g->map1[x - 1][j]; for (int i = 0; i < x; i += 2) g->neighbor[2][g->map1[i][0]] = g->map1[i][y - 1]; for (int i = 0; i < x; i += 2) for (int j = 2; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 1; i < x; i += 2) for (int j = 1; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 1; i < x; i += 2) for (int j = 0; j < y - 1; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = 0; i < x; i += 2) for (int j = 1; j < y - 1; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = y % 2; i < x; i += 2) g->neighbor[2][g->map1[i][y - 1]] = g->map1[i][0]; for (int c = 0; c < g->cellCount; c++) { g->cedge[0][c] = (c * 3) / 2; g->cedge[1][g->neighbor[0][c]] = g->cedge[0][c]; g->ecell[0][g->cedge[0][c]] = g->neighbor[0][c]; g->ecell[1][g->cedge[0][c]] = c; } for (int c = 0; c < g->cellCount; c += 2) { g->cedge[2][c] = (c * 3) / 2 + 2; g->cedge[2][g->neighbor[2][c]] = g->cedge[2][c]; g->ecell[0][g->cedge[2][c]] = c; g->ecell[1][g->cedge[2][c]] = g->neighbor[2][c]; } } void init_edge_weights(GRID * g) { GVAL j = -1.0; for (int i = 0; i < BLKSIZE; i++) { g->edge_weights[0][i] = 1.0; g->edge_weights[1][i] = -1.0; g->edge_weights[2][i] = j; j = j * -1; } } #elif NBRS==4 int calc_edge_count(GRID * g) { return (int) (((float) g->cellCount * NBRS / 2.0) + (2.0 * g->height)); } void tessellation(GRID * g) { for (int i = 0; i < NBRS; i++) { g->neighbor[i] = malloc((g->cellCount) * sizeof(int)); g->cedge[i] = malloc((g->cellCount) * sizeof(int)); } for (int i = 0; i < 2; i++) { g->ecell[i] = malloc((g->edgeCount) * sizeof(int)); } int x = g->height, y = g->height; for (int i = 0; i < x; i++) { // for (int j = 0; j < y; j++) { if (i < x - 1) g->neighbor[0][g->map1[i][j]] = g->map1[i + 1][j]; else g->neighbor[0][g->map1[i][j]] = g->cellCount; if (j < y - 1) g->neighbor[1][g->map1[i][j]] = g->map1[i][j + 1]; else g->neighbor[1][g->map1[i][j]] = g->cellCount; if (i > 0) g->neighbor[2][g->map1[i][j]] = g->map1[i - 1][j]; else g->neighbor[2][g->map1[i][j]] = g->cellCount; if (j > 0) g->neighbor[3][g->map1[i][j]] = g->map1[i][j - 1]; else g->neighbor[3][g->map1[i][j]] = g->cellCount; g->neighbor[0][g->cellCount] = g->neighbor[1][g->cellCount] = g->neighbor[2][g->cellCount] = g->neighbor[3][g->cellCount] = g->cellCount; // g->cedge[0][g->map1[i][j]] = g->map1[i][j]; g->cedge[1][g->map1[i][j]] = g->cellCount + g->map1[i][j]; if (i > 0) g->cedge[2][g->map1[i][j]] = g->map1[i - 1][j]; else g->cedge[2][g->map1[i][j]] = g->cellCount * 2 + j; if (j > 0) g->cedge[3][g->map1[i][j]] = g->cellCount + g->map1[i][j - 1]; else g->cedge[3][g->map1[i][j]] = g->cellCount * 2 + y + i; g->ecell[1][g->map1[i][j]] = g->map1[i][j]; g->ecell[1][g->cellCount + g->map1[i][j]] = g->map1[i][j]; if (i > 0) g->ecell[0][g->map1[i - 1][j]] = g->map1[i][j]; else g->ecell[0][g->cellCount * 2 + j] = g->map1[i][j]; if (j > 0) g->ecell[0][g->cellCount + g->map1[i][j - 1]] = g->map1[i][j]; else g->ecell[0][g->cellCount * 2 + y + i] = g->map1[i][j]; g->ecell[0][g->map1[x - 1][j]] = g->cellCount; //TODO: out of loop g->ecell[0][g->cellCount + g->map1[i][y - 1]] = g->cellCount; // g->ecell[1][g->cellCount * 2 + j] = g->cellCount; //TODO: out of loop g->ecell[1][g->cellCount * 2 + y + i] = g->cellCount; } } } // void init_edge_weights(GRID * g) { for (int i = 0; i < BLKSIZE; i++) { g->edge_weights[0][i] = 1.0; g->edge_weights[1][i] = 1.0; g->edge_weights[2][i] = -1.0; g->edge_weights[3][i] = -1.0; } } #elif NBRS==6 int calc_edge_count(GRID * g) { return (int) (((float) g->cellCount * NBRS / 2.0) + (4.0 * g->height) - 1); } void tessellation(GRID * g) { for (int i = 0; i < NBRS; i++) { g->neighbor[i] = malloc((g->cellCount) * sizeof(int)); g->cedge[i] = malloc((g->cellCount) * sizeof(int)); } for (int i = 0; i < 2; i++) { g->ecell[i] = malloc((g->edgeCount) * sizeof(int)); } int x = g->height, y = g->height; for (int i = 0; i < x; i++) for (int j = 0; j < y; j += 2) g->neighbor[0][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = 0; i < x - 1; i++) for (int j = 1; j < y - 1; j += 2) g->neighbor[0][g->map1[i][j]] = g->map1[i + 1][j + 1]; for (int j = 1; j < y - 1; j += 2) g->neighbor[0][g->map1[x - 1][j]] = g->cellCount; for (int i = 0; i < x; i++) g->neighbor[0][g->map1[i][y - 1]] = g->cellCount; for (int i = 0; i < x - 1; i++) for (int j = 0; j < y; j++) g->neighbor[1][g->map1[i][j]] = g->map1[i + 1][j]; for (int j = 0; j < y; j++) g->neighbor[1][g->map1[x - 1][j]] = g->cellCount; for (int i = 0; i < x; i++) g->neighbor[2][g->map1[i][0]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 2; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 0; i < x - 1; i++) for (int j = 1; j < y; j += 2) g->neighbor[2][g->map1[i][j]] = g->map1[i + 1][j - 1]; for (int j = 1; j < y; j += 2) g->neighbor[2][g->map1[x - 1][j]] = g->cellCount; for (int i = 0; i < x; i++) g->neighbor[3][g->map1[i][0]] = g->cellCount; for (int i = 1; i < x; i++) for (int j = 2; j < y; j += 2) g->neighbor[3][g->map1[i][j]] = g->map1[i - 1][j - 1]; for (int j = 2; j < y; j += 2) g->neighbor[3][g->map1[0][j]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 1; j < y; j += 2) g->neighbor[3][g->map1[i][j]] = g->map1[i][j - 1]; for (int i = 1; i < x; i++) for (int j = 0; j < y; j++) g->neighbor[4][g->map1[i][j]] = g->map1[i - 1][j]; for (int j = 0; j < y; j++) g->neighbor[4][g->map1[0][j]] = g->cellCount; for (int i = 1; i < x; i++) for (int j = 0; j < y; j += 2) g->neighbor[5][g->map1[i][j]] = g->map1[i - 1][j + 1]; for (int j = 0; j < y; j += 2) g->neighbor[5][g->map1[0][j]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 1; j < y - 1; j += 2) g->neighbor[5][g->map1[i][j]] = g->map1[i][j + 1]; for (int i = 0; i < x; i++) g->neighbor[5][g->map1[i][y - 1]] = g->cellCount; for (int i = 0; i < x; i++) for (int j = 0; j < y; j++) { g->cedge[0][g->map1[i][j]] = g->map1[i][j]; g->cedge[1][g->map1[i][j]] = g->cellCount + g->map1[i][j]; g->cedge[2][g->map1[i][j]] = g->cellCount * 2 + g->map1[i][j]; } for (int i = 0; i < x; i++) for (int j = 0; j < y; j++) { if (j == 0 || ((i == 0) && (j % 2 == 0))) g->cedge[3][g->map1[i][j]] = g->cellCount * 3 + (j == 0 ? i : x + j / 2 - 1); else g->cedge[3][g->map1[i][j]] = g->cedge[0][g->neighbor[3][g->map1[i][j]]]; if (i == 0) g->cedge[4][g->map1[i][j]] = g->cellCount * 3 + x + x / 2 + j - 1; else g->cedge[4][g->map1[i][j]] = g->cedge[1][g->neighbor[4][g->map1[i][j]]]; if ((j == y - 1) || ((i == 0) && (j % 2 == 0))) g->cedge[5][g->map1[i][j]] = g->cellCount * 3 + x + x / 2 + y - 1 + (j == y - 1 ? y / 2 + i : j / 2); else g->cedge[5][g->map1[i][j]] = g->cedge[2][g->neighbor[5][g->map1[i][j]]]; } for (int c = 0; c < g->cellCount; c++) { g->ecell[0][g->cedge[0][c]] = g->neighbor[0][c]; g->ecell[1][g->cedge[0][c]] = c; g->ecell[0][g->cedge[1][c]] = g->neighbor[1][c]; g->ecell[1][g->cedge[1][c]] = c; g->ecell[0][g->cedge[2][c]] = g->neighbor[2][c]; g->ecell[1][g->cedge[2][c]] = c; g->ecell[0][g->cedge[3][c]] = c; g->ecell[1][g->cedge[3][c]] = g->neighbor[3][c]; g->ecell[0][g->cedge[4][c]] = c; g->ecell[1][g->cedge[4][c]] = g->neighbor[4][c]; g->ecell[0][g->cedge[5][c]] = c; g->ecell[1][g->cedge[5][c]] = g->neighbor[5][c]; } } void init_edge_weights(GRID * g) { for (int i = 0; i < BLKSIZE; i++) { g->edge_weights[0][i] = g->edge_weights[1][i] = g->edge_weights[2][i] = 1.0; g->edge_weights[3][i] = g->edge_weights[4][i] = g->edge_weights[5][i] = -1.0; } } #else #error "supported shapes are traiangles,rectangles and hexagons" #endif void init_blocking(GRID * g) { for (int i = 0; i < NBRS; i++) { { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cNeighborIdx[i] = malloc(24); g->cNeighborIdx[i]->name = "g->cNeighborIdx[ i]"; g->cNeighborIdx[i]->loc = 0; g->cNeighborIdx[i]->dim = 2; g->cNeighborIdx[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cNeighborIdx[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cNeighborIdx[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cNeighborIdx[i]->data_pointer.p2[b][c] = (int) 0; } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cNeighborBlk[i] = malloc(24); g->cNeighborBlk[i]->name = "g->cNeighborBlk[ i]"; g->cNeighborBlk[i]->loc = 0; g->cNeighborBlk[i]->dim = 2; g->cNeighborBlk[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cNeighborBlk[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cNeighborBlk[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cNeighborBlk[i]->data_pointer.p2[b][c] = (int) 0; } } } } int first_cBlock = g->mpi_rank * ((g->cBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size); int first_eBlock = g->mpi_rank * ((g->eBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size); for (int i = 0; i < NBRS; i++) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if ((g->mpi_rank == g->mpi_world_size - 1) && (block_index == (g->cBlkCnt - 1) % ((g->cBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size)) && (cell_index > (g->cellCount - 1) % g->blkSize)) { g->cNeighborIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = cell_index; g->cNeighborBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = block_index; } else { g->cNeighborIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = g->neighbor[i][(first_cBlock + block_index) * g->blkSize + cell_index] % g->blkSize; g->cNeighborBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = g->neighbor[i][(first_cBlock + block_index) * g->blkSize + cell_index] / g->blkSize; } } } } } for (int i = 0; i < NBRS; i++) { { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cEdgeIdx[i] = malloc(24); g->cEdgeIdx[i]->name = "g->cEdgeIdx[ i]"; g->cEdgeIdx[i]->loc = 0; g->cEdgeIdx[i]->dim = 2; g->cEdgeIdx[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cEdgeIdx[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cEdgeIdx[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cEdgeIdx[i]->data_pointer.p2[b][c] = (int) 0; } } } { int num_blocks = local_cell_blocks ? local_cell_blocks : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->cEdgeBlk[i] = malloc(24); g->cEdgeBlk[i]->name = "g->cEdgeBlk[ i]"; g->cEdgeBlk[i]->loc = 0; g->cEdgeBlk[i]->dim = 2; g->cEdgeBlk[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->cEdgeBlk[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->cEdgeBlk[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int c = 0; c < g->blkSize; c++) { g->cEdgeBlk[i]->data_pointer.p2[b][c] = (int) 0; } } } } for (int i = 0; i < NBRS; i++) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { if ((g->mpi_rank == g->mpi_world_size - 1) && (block_index == (g->cBlkCnt - 1) % ((g->cBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size)) && (cell_index > (g->cellCount - 1) % g->blkSize)) { g->cEdgeIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = 0; g->cEdgeBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = first_eBlock; } else { g->cEdgeIdx[i]->data_pointer.p2[(block_index)][(cell_index)] = g->cedge[i][(first_cBlock + block_index) * g->blkSize + cell_index] % g->blkSize; g->cEdgeBlk[i]->data_pointer.p2[(block_index)][(cell_index)] = g->cedge[i][(first_cBlock + block_index) * g->blkSize + cell_index] / g->blkSize; } } } } } for (int i = 0; i < 2; i++) { { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->eCellIdx[i] = malloc(24); g->eCellIdx[i]->name = "g->eCellIdx[ i]"; g->eCellIdx[i]->loc = 1; g->eCellIdx[i]->dim = 2; g->eCellIdx[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->eCellIdx[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->eCellIdx[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int e = 0; e < g->blkSize; e++) { g->eCellIdx[i]->data_pointer.p2[b][e] = (int) 0; } } } { int num_blocks = local_edge_blocks ? local_edge_blocks : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); g->eCellBlk[i] = malloc(24); g->eCellBlk[i]->name = "g->eCellBlk[ i]"; g->eCellBlk[i]->loc = 1; g->eCellBlk[i]->dim = 2; g->eCellBlk[i]->data_pointer.p2 = malloc((num_blocks * g->blkSize) * sizeof(int) + (num_blocks) * sizeof(char *)); char *pos = (char *) g->eCellBlk[i]->data_pointer.p2 + num_blocks * sizeof(char *); for (int b = 0; b < num_blocks; b++) { g->eCellBlk[i]->data_pointer.p2[b] = (int *) pos; pos += g->blkSize * sizeof(int); for (int e = 0; e < g->blkSize; e++) { g->eCellBlk[i]->data_pointer.p2[b][e] = (int) 0; } } } } for (int i = 0; i < 2; i++) { { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { if ((g->mpi_rank == g->mpi_world_size - 1) && (block_index == (g->eBlkCnt - 1) % ((g->eBlkCnt + g->mpi_world_size - 1) / g->mpi_world_size)) && (edge_index > (g->edgeCount - 1) % g->blkSize)) { g->eCellIdx[i]->data_pointer.p2[(block_index)][(edge_index)] = 0; g->eCellBlk[i]->data_pointer.p2[(block_index)][(edge_index)] = first_cBlock; } else { g->eCellIdx[i]->data_pointer.p2[(block_index)][(edge_index)] = g->ecell[i][(first_eBlock + block_index) * g->blkSize + edge_index] % g->blkSize; g->eCellBlk[i]->data_pointer.p2[(block_index)][(edge_index)] = g->ecell[i][(first_eBlock + block_index) * g->blkSize + edge_index] / g->blkSize; } } } } } } void init_grid(GRID * g, int cellCount, int height) { g->cellCount = cellCount * cellCount; //cellCount; g->height = cellCount; g->edgeCount = calc_edge_count(g); g->blkSize = BLKSIZE; g->cBlkCnt = (g->cellCount + g->blkSize - 1) / g->blkSize; g->eBlkCnt = (g->edgeCount + g->blkSize - 1) / g->blkSize; create_maps(g); tessellation(g); g->height = height; init_edge_weights(g); init_blocking(g); { int cell_min = 0; int cell_max = g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); int edge_min = 0; int edge_max = g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); int *cn_H = malloc(g->mpi_world_size * sizeof(int) * 2); cn_c = malloc(g->mpi_world_size * sizeof(int)); for (int i = 0; i < g->mpi_world_size; i++) cn_c[i] = 0; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->cBlkCnt) continue; if (g->cNeighborBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { cn_c[g->cNeighborBlk[n]->data_pointer.p2[b][c] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)]++; } } } } cn_H[0] = cn_c[0] + cell_max * g->blkSize; for (int i = 1; i < g->mpi_world_size; i++) { cn_H[2 * i] = cn_c[i] + cn_H[2 * i - 2]; } int ml = 0; for (int i = 0; i < g->mpi_world_size; i++) { ml += cn_c[i]; } neighbor_map = malloc(ml * sizeof(int) * 5); for (int i = 0; i < g->mpi_world_size; i++) { cn_H[2 * i + 1] = cn_H[2 * i] % g->blkSize; cn_H[2 * i] = cn_H[2 * i] / g->blkSize; } int *tp = malloc(g->mpi_world_size * sizeof(int) * 2); tp[0] = cell_max; tp[1] = 0; for (int i = 1; i < g->mpi_world_size; i++) { tp[i * 2] = cn_H[i * 2 - 2]; tp[i * 2 + 1] = cn_H[i * 2 - 1]; } int *mi = malloc(g->mpi_world_size * sizeof(int)); mi[0] = 0; for (int i = 1; i < g->mpi_world_size; i++) mi[i] = 5 * cn_c[i - 1] + mi[i - 1]; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->cBlkCnt || g->cNeighborBlk[n]->data_pointer.p2[b][c] < 0) { g->cNeighborBlk[n]->data_pointer.p2[b][c] = -1; } else if (g->cNeighborBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->cNeighborBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { int pn = g->cNeighborBlk[n]->data_pointer.p2[b][c] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); neighbor_map[mi[pn]++] = pn; neighbor_map[mi[pn]++] = g->cNeighborBlk[n]->data_pointer.p2[b][c] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); neighbor_map[mi[pn]++] = g->cNeighborIdx[n]->data_pointer.p2[b][c]; neighbor_map[mi[pn]++] = tp[pn * 2]; neighbor_map[mi[pn]++] = tp[pn * 2 + 1]; g->cNeighborBlk[n]->data_pointer.p2[b][c] = tp[pn * 2]; g->cNeighborIdx[n]->data_pointer.p2[b][c] = tp[pn * 2 + 1]; if (++tp[pn * 2 + 1] == g->blkSize) { tp[pn * 2]++; tp[pn * 2 + 1] = 0; } } else { g->cNeighborBlk[n]->data_pointer.p2[b][c] = g->cNeighborBlk[n]->data_pointer.p2[b][c] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); } } } } int *ce_H = malloc(g->mpi_world_size * sizeof(int) * 2); ce_c = malloc(g->mpi_world_size * sizeof(int)); for (int i = 0; i < g->mpi_world_size; i++) ce_c[i] = 0; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cEdgeBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_min || g->cEdgeBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_max) { ce_c[g->cEdgeBlk[n]->data_pointer.p2[b][c] / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)]++; } } } } ce_H[0] = ce_c[0] + edge_max * g->blkSize; for (int i = 1; i < g->mpi_world_size; i++) { ce_H[2 * i] = ce_c[i] + ce_H[2 * i - 2]; } ml = 0; for (int i = 0; i < g->mpi_world_size; i++) { ml += ce_c[i]; } cedge_map = malloc(ml * sizeof(int) * 5); for (int i = 0; i < g->mpi_world_size; i++) { ce_H[2 * i + 1] = ce_H[2 * i] % g->blkSize; ce_H[2 * i] = ce_H[2 * i] / g->blkSize; } local_edge_blocks = ce_H[g->mpi_world_size * 2 - 2] + 1; tp[0] = edge_max; tp[1] = 0; for (int i = 1; i < g->mpi_world_size; i++) { tp[i * 2] = ce_H[i * 2 - 2]; tp[i * 2 + 1] = ce_H[i * 2 - 1]; } mi[0] = 0; for (int i = 1; i < g->mpi_world_size; i++) mi[i] = 5 * ce_c[i - 1] + mi[i - 1]; for (int b = cell_min; b < cell_max; b++) { for (int c = (0); c < (g->blkSize); c++) { for (int n = (0); n < 3; n++) { if (g->cEdgeBlk[n]->data_pointer.p2[b][c] < g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_min || g->cEdgeBlk[n]->data_pointer.p2[b][c] >= g->mpi_rank * ((((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + edge_max) { int pn = g->cEdgeBlk[n]->data_pointer.p2[b][c] / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); cedge_map[mi[pn]++] = pn; cedge_map[mi[pn]++] = g->cEdgeBlk[n]->data_pointer.p2[b][c] % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); cedge_map[mi[pn]++] = g->cEdgeIdx[n]->data_pointer.p2[b][c]; cedge_map[mi[pn]++] = tp[pn * 2]; cedge_map[mi[pn]++] = tp[pn * 2 + 1]; g->cEdgeBlk[n]->data_pointer.p2[b][c] = tp[pn * 2]; g->cEdgeIdx[n]->data_pointer.p2[b][c] = tp[pn * 2 + 1]; if (++tp[pn * 2 + 1] == g->blkSize) { tp[pn * 2]++; tp[pn * 2 + 1] = 0; } } else { g->cEdgeBlk[n]->data_pointer.p2[b][c] = g->cEdgeBlk[n]->data_pointer.p2[b][c] % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); } } } } int *ec_H = malloc(g->mpi_world_size * sizeof(int) * 2); ec_c = malloc(g->mpi_world_size * sizeof(int)); for (int i = 0; i < g->mpi_world_size; i++) ec_c[i] = 0; for (int b = edge_min; b < edge_max; b++) { for (int e = (0); e < (g->blkSize); e++) { for (int n = (0); n < 2; n++) { if (g->eCellBlk[n]->data_pointer.p2[b][e] >= g->cBlkCnt) continue; if (g->eCellBlk[n]->data_pointer.p2[b][e] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->eCellBlk[n]->data_pointer.p2[b][e] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { ec_c[g->eCellBlk[n]->data_pointer.p2[b][e] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)]++; } } } } ec_H[0] = ec_c[0] + cn_H[g->mpi_world_size * 2 - 2] * g->blkSize + cn_H[g->mpi_world_size * 2 - 1]; for (int i = 1; i < g->mpi_world_size; i++) { ec_H[2 * i] = ec_c[i] + ec_H[2 * i - 2]; } ml = 0; for (int i = 0; i < g->mpi_world_size; i++) { ml += ec_c[i]; } ecell_map = malloc(ml * sizeof(int) * 5); for (int i = 0; i < g->mpi_world_size; i++) { ec_H[2 * i + 1] = ec_H[2 * i] % g->blkSize; ec_H[2 * i] = ec_H[2 * i] / g->blkSize; } local_cell_blocks = ec_H[g->mpi_world_size * 2 - 2] + 1; tp[0] = cn_H[g->mpi_world_size * 2 - 2]; tp[1] = cn_H[g->mpi_world_size * 2 - 1]; for (int i = 1; i < g->mpi_world_size; i++) { tp[i * 2] = ec_H[i * 2 - 2]; tp[i * 2 + 1] = ec_H[i * 2 - 1]; } mi[0] = 0; for (int i = 1; i < g->mpi_world_size; i++) mi[i] = 5 * ec_c[i - 1] + mi[i - 1]; for (int b = edge_min; b < edge_max; b++) { for (int e = (0); e < (g->blkSize); e++) { for (int n = (0); n < 2; n++) { if (g->eCellBlk[n]->data_pointer.p2[b][e] >= g->cBlkCnt || g->eCellBlk[n]->data_pointer.p2[b][e] < 0) { g->eCellBlk[n]->data_pointer.p2[b][e] = -1; } else if (g->eCellBlk[n]->data_pointer.p2[b][e] < g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_min || g->eCellBlk[n]->data_pointer.p2[b][e] >= g->mpi_rank * ((((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size)) + cell_max) { int pn = g->eCellBlk[n]->data_pointer.p2[b][e] / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); ecell_map[mi[pn]++] = pn; ecell_map[mi[pn]++] = g->eCellBlk[n]->data_pointer.p2[b][e] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); ecell_map[mi[pn]++] = g->eCellIdx[n]->data_pointer.p2[b][e]; ecell_map[mi[pn]++] = tp[pn * 2]; ecell_map[mi[pn]++] = tp[pn * 2 + 1]; g->eCellBlk[n]->data_pointer.p2[b][e] = tp[pn * 2]; g->eCellIdx[n]->data_pointer.p2[b][e] = tp[pn * 2 + 1]; if (++tp[pn * 2 + 1] == g->blkSize) { tp[pn * 2]++; tp[pn * 2 + 1] = 0; } } else { g->eCellBlk[n]->data_pointer.p2[b][e] = g->eCellBlk[n]->data_pointer.p2[b][e] % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); } } } } free(tp); mpi_send_requests = malloc(g->mpi_world_size * 2 * sizeof(MPI_Request)); mpi_recv_requests = &mpi_send_requests[g->mpi_world_size]; mpi_send_requests[g->mpi_rank] = MPI_REQUEST_NULL; mpi_recv_requests[g->mpi_rank] = MPI_REQUEST_NULL; comm_tag = 10; cn_crem = malloc(g->mpi_world_size * sizeof(int)); ce_crem = malloc(g->mpi_world_size * sizeof(int)); ec_crem = malloc(g->mpi_world_size * sizeof(int)); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { MPI_Isend(&cn_c[pn], 1, MPI_INT, pn, 0, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&cn_crem[pn], 1, MPI_INT, pn, 0, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Isend(&ce_c[pn], 1, MPI_INT, pn, 1, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&ce_crem[pn], 1, MPI_INT, pn, 1, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Isend(&ec_c[pn], 1, MPI_INT, pn, 2, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&ec_crem[pn], 1, MPI_INT, pn, 2, MPI_COMM_WORLD, &mpi_recv_requests[pn]); } else cn_c[g->mpi_rank] = ce_c[g->mpi_rank] = ec_c[g->mpi_rank] = cn_crem[g->mpi_rank] = ce_crem[g->mpi_rank] = ec_crem[g->mpi_rank] = 0; } MPI_Waitall(g->mpi_world_size * 2, mpi_send_requests, MPI_STATUSES_IGNORE); for (int i = 1; i < g->mpi_world_size; i++) { cn_c[i] += cn_c[i - 1]; cn_crem[i] += cn_crem[i - 1]; } neighbor_maprem = malloc(cn_crem[g->mpi_world_size - 1] * sizeof(int) * 2); for (int i = 1; i < g->mpi_world_size; i++) { ce_c[i] += ce_c[i - 1]; ce_crem[i] += ce_crem[i - 1]; } cedge_maprem = malloc(ce_crem[g->mpi_world_size - 1] * sizeof(int) * 2); for (int i = 1; i < g->mpi_world_size; i++) { ec_c[i] += ec_c[i - 1]; ec_crem[i] += ec_crem[i - 1]; } ecell_maprem = malloc(ec_crem[g->mpi_world_size - 1] * sizeof(int) * 2); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { int *buf = malloc((cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * sizeof(int) * 2); for (int i = 0; i < (cn_c[pn] - (pn ? cn_c[pn - 1] : 0)); i++) { buf[2 * i] = neighbor_map[(pn ? cn_c[pn - 1] * 5 : 0) + 5 * i + 1]; buf[2 * i + 1] = neighbor_map[(pn ? cn_c[pn - 1] * 5 : 0) + 5 * i + 2]; } MPI_Isend(buf, (cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * 2, MPI_INT, pn, 3, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&neighbor_maprem[(pn ? cn_crem[pn - 1] * 2 : 0)], (cn_crem[pn] - (pn ? cn_crem[pn - 1] : 0)) * 2, MPI_INT, pn, 3, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Wait(&mpi_recv_requests[pn], MPI_STATUS_IGNORE); MPI_Wait(&mpi_send_requests[pn], MPI_STATUS_IGNORE); free(buf); buf = malloc((ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * sizeof(int) * 2); for (int i = 0; i < (ce_c[pn] - (pn ? ce_c[pn - 1] : 0)); i++) { buf[2 * i] = cedge_map[(pn ? ce_c[pn - 1] * 5 : 0) + 5 * i + 1]; buf[2 * i + 1] = cedge_map[(pn ? ce_c[pn - 1] * 5 : 0) + 5 * i + 2]; } MPI_Isend(buf, (ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * 2, MPI_INT, pn, 4, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&cedge_maprem[(pn ? ce_crem[pn - 1] * 2 : 0)], (ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * 2, MPI_INT, pn, 4, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Wait(&mpi_recv_requests[pn], MPI_STATUS_IGNORE); MPI_Wait(&mpi_send_requests[pn], MPI_STATUS_IGNORE); free(buf); buf = malloc((ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * sizeof(int) * 2); for (int i = 0; i < (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)); i++) { buf[2 * i] = ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 1]; buf[2 * i + 1] = ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 2]; } MPI_Isend(buf, (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * 2, MPI_INT, pn, 5, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(&ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0)], (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * 2, MPI_INT, pn, 5, MPI_COMM_WORLD, &mpi_recv_requests[pn]); MPI_Wait(&mpi_recv_requests[pn], MPI_STATUS_IGNORE); MPI_Wait(&mpi_send_requests[pn], MPI_STATUS_IGNORE); free(buf); } } neighbor_2Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); neighbor_3Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_2Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_3Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_2Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_3Dbufrem = malloc(g->mpi_world_size * sizeof(GVAL *)); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { neighbor_2Dbufrem[pn] = malloc((cn_crem[pn] - (pn ? cn_crem[pn - 1] : 0)) * sizeof(GVAL)); neighbor_3Dbufrem[pn] = malloc((cn_crem[pn] - (pn ? cn_crem[pn - 1] : 0)) * g->height * sizeof(GVAL)); cedge_2Dbufrem[pn] = malloc((ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * sizeof(GVAL)); cedge_3Dbufrem[pn] = malloc((ce_crem[pn] - (pn ? ce_crem[pn - 1] : 0)) * g->height * sizeof(GVAL)); ecell_2Dbufrem[pn] = malloc((ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * sizeof(GVAL)); ecell_3Dbufrem[pn] = malloc((ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * g->height * sizeof(GVAL)); } } neighbor_2Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); neighbor_3Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_2Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); cedge_3Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_2Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); ecell_3Dbuf = malloc(g->mpi_world_size * sizeof(GVAL *)); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { neighbor_2Dbuf[pn] = malloc((cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * sizeof(GVAL)); neighbor_3Dbuf[pn] = malloc((cn_c[pn] - (pn ? cn_c[pn - 1] : 0)) * g->height * sizeof(GVAL)); cedge_2Dbuf[pn] = malloc((ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * sizeof(GVAL)); cedge_3Dbuf[pn] = malloc((ce_c[pn] - (pn ? ce_c[pn - 1] : 0)) * g->height * sizeof(GVAL)); ecell_2Dbuf[pn] = malloc((ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * sizeof(GVAL)); ecell_3Dbuf[pn] = malloc((ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * g->height * sizeof(GVAL)); } } } } void get_indices_c(GRID * g, int blk, int *cb, int *ce) { *cb = 0; *ce = blk == g->cBlkCnt - 1 ? g->cellCount % g->blkSize == 0 ? g->blkSize : g->cellCount % g->blkSize : g->blkSize; } void get_indices_e(GRID * g, int blk, int *eb, int *ee) { *eb = 0; *ee = blk == g->eBlkCnt - 1 ? g->edgeCount % g->blkSize == 0 ? g->blkSize : g->edgeCount % g->blkSize : g->blkSize; }
CPhotoconsistencyOdometryBiObjective.h
/* * Photoconsistency-Visual-Odometry * Multiscale Photoconsistency Visual Odometry from RGBD Images * Copyright (c) 2012, Miguel Algaba Borrego * * http://code.google.com/p/photoconsistency-visual-odometry/ * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the holder(s) nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _CPHOTOCONSISTENCY_ODOMETRY_BIOBJECTIVE_ #define _CPHOTOCONSISTENCY_ODOMETRY_BIOBJECTIVE_ #define ENABLE_GAUSSIAN_BLUR 1 #define ENABLE_BOX_FILTER_BLUR 0 #define ENABLE_OPENMP_MULTITHREADING 0 #define ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS 0 #include "CPhotoconsistencyOdometry.h" #include "opencv2/highgui/highgui.hpp" #include "opencv2/contrib/contrib.hpp" //TickMeter #include <iostream> namespace PhotoconsistencyOdometry { namespace BiObjective { /*!This class computes the rigid (6DoF) transformation that best aligns a pair of RGBD frames using a photoconsistency maximization approach. To estimate the rigid transformation, this class implements a coarse to fine approach minimizing the photometric and depth error simultaneously. Thus, the algorithm starts finding a first pose approximation at a low resolution level and uses the estimate to initialize the optimization at greater image scales. Both the residuals and jacobians are computed analytically.*/ class CPhotoconsistencyOdometryBiObjective : public CPhotoconsistencyOdometry { private: /*!Intensity (gray), depth and gradient image pyramids. Each pyramid has 'numOptimizationLevels' levels.*/ std::vector<cv::Mat> gray0Pyr,gray1Pyr,depth0Pyr,depth1Pyr,gray1GradXPyr,gray1GradYPyr,depth1GradXPyr,depth1GradYPyr; /*!Camera matrix (intrinsic parameters).*/ Eigen::Matrix3f cameraMatrix; /*!Current optimization level. Level 0 corresponds to the higher image resolution.*/ int optimizationLevel; /*!Number of optimization levels.*/ int numOptimizationLevels; /*!Scaling factor to update the state vector (at each level).*/ std::vector<float>lambda_optimization_step; /*!Size (in pixels) of the blur filter (at each level).*/ std::vector<int> blurFilterSize; /*!Scaling factor applied to the image gradients (at each level).*/ std::vector<float> imageGradientsScalingFactor; /*!Maximum number of iterations for the Gauss-Newton algorithm (at each level).*/ std::vector<int> max_num_iterations; /*!Minimum gradient norm of the jacobian (at each level).*/ std::vector<float> min_gradient_norm; /*!Enable the visualization of the optimization process (only for debug).*/ bool visualizeIterations; /*!State vector.*/ Eigen::Matrix<double,6,1> stateVector; //Parameter vector (x y z yaw pitch roll) /*!Gradient of the error function.*/ Eigen::Matrix<double,6,1> gradients; /*!Current iteration at the current optimization level.*/ int iter; /*!Minimum allowed depth to consider a depth pixel valid.*/ float minDepth; /*!Maximum allowed depth to consider a depth pixel valid.*/ float maxDepth; /*!Depth component gain. This variable is used to scale the depth values so that depth components are similar to intensity values.*/ float depthComponentGain; void buildPyramid(cv::Mat & img,std::vector<cv::Mat>& pyramid,int levels,bool applyBlur) { //Create space for all the images pyramid.resize(levels); double factor = 1; for(int level=0;level<levels;level++) { //Create an auxiliar image of factor times the size of the original image cv::Mat imgAux; if(level!=0) { cv::resize(img,imgAux,cv::Size(0,0),factor,factor); } else { imgAux = img; } //Blur the resized image with different filter size depending on the current pyramid level if(applyBlur) { #if ENABLE_GAUSSIAN_BLUR if(blurFilterSize[level]>0) { cv::GaussianBlur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level]),3); cv::GaussianBlur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level]),3); } #elif ENABLE_BOX_FILTER_BLUR if(blurFilterSize[level]>0) { cv::blur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level])); cv::blur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level])); } #endif } //Assign the resized image to the current level of the pyramid pyramid[level]=imgAux; factor = factor/2; } } void buildIntensityDerivativesPyramids(std::vector<cv::Mat>& imagePyramid,std::vector<cv::Mat>& derXPyramid,std::vector<cv::Mat>& derYPyramid) { //Compute image gradients int delta = 0; int ddepth = CV_32FC1; //Create space for all the derivatives images derXPyramid.resize(imagePyramid.size()); derYPyramid.resize(imagePyramid.size()); for(int level=0;level<imagePyramid.size();level++) { // Compute the gradient in x cv::Scharr( imagePyramid[level], derXPyramid[level], ddepth, 1, 0, imageGradientsScalingFactor[level], delta, cv::BORDER_DEFAULT ); // Compute the gradient in y cv::Scharr( imagePyramid[level], derYPyramid[level], ddepth, 0, 1, imageGradientsScalingFactor[level], delta, cv::BORDER_DEFAULT ); } } float maxDepthValue(cv::Mat & image) { float maxDepth = 0; for(int r=0;r<image.rows;r++) { for(int c=0;c<image.cols;c++) { if(image.at<float>(r,c)>maxDepth) { maxDepth = image.at<float>(r,c); } } } return maxDepth; } void buildDepthDerivativesPyramids(std::vector<cv::Mat>& imagePyramid,std::vector<cv::Mat>& derXPyramid,std::vector<cv::Mat>& derYPyramid) { //Compute image gradients int delta = 0; int ddepth = CV_32FC1; //Create space for all the derivatives images derXPyramid.resize(imagePyramid.size()); derYPyramid.resize(imagePyramid.size()); for(int level=0;level<imagePyramid.size();level++) { cv::Mat imgNormalizedDepth; imagePyramid[level].convertTo(imgNormalizedDepth, CV_32FC1,1./maxDepth); // Compute the gradient in x cv::Scharr( imgNormalizedDepth, derXPyramid[level], ddepth, 1, 0, imageGradientsScalingFactor[level], delta, cv::BORDER_DEFAULT ); // Compute the gradient in y cv::Scharr( imgNormalizedDepth, derYPyramid[level], ddepth, 0, 1, imageGradientsScalingFactor[level], delta, cv::BORDER_DEFAULT ); } } //Separated jacobians void computeResidualsAndJacobians(cv::Mat & source_grayImg, cv::Mat & source_depthImg, cv::Mat & target_grayImg, cv::Mat & target_depthImg, cv::Mat & target_intensityGradXImg, cv::Mat & target_intensityGradYImg, cv::Mat & target_depthGradXImg, cv::Mat & target_depthGradYImg, Eigen::Matrix<double,Eigen::Dynamic,1> & residuals, Eigen::Matrix<double,Eigen::Dynamic,6> & jacobians, cv::Mat & warped_source_grayImage) { int nRows = source_grayImg.rows; int nCols = source_grayImg.cols; double scaleFactor = 1.0/pow(2,optimizationLevel); double fx = cameraMatrix(0,0)*scaleFactor; double fy = cameraMatrix(1,1)*scaleFactor; double ox = cameraMatrix(0,2)*scaleFactor; double oy = cameraMatrix(1,2)*scaleFactor; float inv_fx = 1.f/fx; float inv_fy = 1.f/fy; double x = stateVector[0]; double y = stateVector[1]; double z = stateVector[2]; double yaw = stateVector[3]; double pitch = stateVector[4]; double roll = stateVector[5]; //Compute the rigid transformation matrix from the parameters Eigen::Matrix4f Rt = Eigen::Matrix4f::Identity(); double sin_yaw = sin(yaw); double cos_yaw = cos(yaw); double sin_pitch = sin(pitch); double cos_pitch = cos(pitch); double sin_roll = sin(roll); double cos_roll = cos(roll); Rt(0,0) = cos_yaw * cos_pitch; Rt(0,1) = cos_yaw * sin_pitch * sin_roll - sin_yaw * cos_roll; Rt(0,2) = cos_yaw * sin_pitch * cos_roll + sin_yaw * sin_roll; Rt(0,3) = x; Rt(1,0) = sin_yaw * cos_pitch; Rt(1,1) = sin_yaw * sin_pitch * sin_roll + cos_yaw * cos_roll; Rt(1,2) = sin_yaw * sin_pitch * cos_roll - cos_yaw * sin_roll; Rt(1,3) = y; Rt(2,0) = -sin_pitch; Rt(2,1) = cos_pitch * sin_roll; Rt(2,2) = cos_pitch * cos_roll; Rt(2,3) = z; Rt(3,0) = 0; Rt(3,1) = 0; Rt(3,2) = 0; Rt(3,3) = 1; depthComponentGain = cv::mean(target_grayImg).val[0]/cv::mean(target_depthImg).val[0]; #if ENABLE_OPENMP_MULTITHREADING #pragma omp parallel for #endif for (int r=0;r<nRows;r++) { for (int c=0;c<nCols;c++) { int i = nCols*r+c; //vector index //Compute the 3D coordinates of the pij of the source frame Eigen::Vector4f point3D; point3D(2)=source_depthImg.at<float>(r,c); if(minDepth < point3D(2) && point3D(2) < maxDepth)//Compute the jacobian only for the valid points { point3D(0)=(c - ox) * point3D(2) * inv_fx; point3D(1)=(r - oy) * point3D(2) * inv_fy; point3D(3)=1; double px = point3D(0); double py = point3D(1); double pz = point3D(2); //Transform the 3D point using the transformation matrix Rt Eigen::Vector4f transformedPoint3D = Rt*point3D; //Project the 3D point to the 2D plane double inv_transformedPz = 1.0/transformedPoint3D(2); double transformed_r,transformed_c; // 2D coordinates of the transformed pixel(r,c) of frame 1 transformed_c = (transformedPoint3D(0) * fx)*inv_transformedPz + ox; //transformed x (2D) transformed_r = (transformedPoint3D(1) * fy)*inv_transformedPz + oy; //transformed y (2D) int transformed_r_int = round(transformed_r); int transformed_c_int = round(transformed_c); //Asign the intensity value to the warped image and compute the difference between the transformed //pixel of frame 1 and the corresponding pixel of frame 2. Compute the error function if((transformed_r_int>=0 && transformed_r_int < nRows) & (transformed_c_int>=0 && transformed_c_int < nCols)) { //Obtain the pixel values that will be used to compute the intensity residual double intensity1; //Intensity value of the pixel(r,c) of the warped frame 1 double intensity2; //Intensity value of the pixel(r,c) of frame 2 intensity1 = source_grayImg.at<float>(r,c); intensity2 = target_grayImg.at<float>(transformed_r_int,transformed_c_int); //Obtain the depth values that will be used to the compute the depth residual double depth1; //Depth value of the pixel(r,c) of the warped frame 1 double depth2; //Depth value of the pixel(r,c) of frame 2 depth1 = source_depthImg.at<float>(r,c); depth2 = target_depthImg.at<float>(transformed_r_int,transformed_c_int); //Compute the rigid transformation jacobian Eigen::Matrix<double,3,6> jacobianRt; //Derivative with respect to x jacobianRt(0,0)=1; jacobianRt(1,0)=0; jacobianRt(2,0)=0; //Derivative with respect to y jacobianRt(0,1)=0; jacobianRt(1,1)=1; jacobianRt(2,1)=0; //Derivative with respect to z jacobianRt(0,2)=0; jacobianRt(1,2)=0; jacobianRt(2,2)=1; //Derivative with respect to yaw jacobianRt(0,3)=py*(-sin(pitch)*sin(roll)*sin(yaw)-cos(roll)*cos(yaw))+pz*(sin(roll)*cos(yaw)-sin(pitch)*cos(roll)*sin(yaw))-cos(pitch)*px*sin(yaw); jacobianRt(1,3)=pz*(sin(roll)*sin(yaw)+sin(pitch)*cos(roll)*cos(yaw))+py*(sin(pitch)*sin(roll)*cos(yaw)-cos(roll)*sin(yaw))+cos(pitch)*px*cos(yaw); jacobianRt(2,3)=0; //Derivative with respect to pitch jacobianRt(0,4)=cos(pitch)*py*sin(roll)*cos(yaw)+cos(pitch)*pz*cos(roll)*cos(yaw)-sin(pitch)*px*cos(yaw); jacobianRt(1,4)=cos(pitch)*py*sin(roll)*sin(yaw)+cos(pitch)*pz*cos(roll)*sin(yaw)-sin(pitch)*px*sin(yaw); jacobianRt(2,4)=-sin(pitch)*py*sin(roll)-sin(pitch)*pz*cos(roll)-cos(pitch)*px; //Derivative with respect to roll jacobianRt(0,5)=py*(sin(roll)*sin(yaw)+sin(pitch)*cos(roll)*cos(yaw))+pz*(cos(roll)*sin(yaw)-sin(pitch)*sin(roll)*cos(yaw)); jacobianRt(1,5)=pz*(-sin(pitch)*sin(roll)*sin(yaw)-cos(roll)*cos(yaw))+py*(sin(pitch)*cos(roll)*sin(yaw)-sin(roll)*cos(yaw)); jacobianRt(2,5)=cos(pitch)*py*cos(roll)-cos(pitch)*pz*sin(roll); //Compute the proyective transformation jacobian Eigen::Matrix<double,2,3> jacobianProy; //Derivative with respect to x jacobianProy(0,0)=fx*inv_transformedPz; jacobianProy(1,0)=0; //Derivative with respect to y jacobianProy(0,1)=0; jacobianProy(1,1)=fy*inv_transformedPz; //Derivative with respect to z jacobianProy(0,2)=-(fx*transformedPoint3D(0))*inv_transformedPz*inv_transformedPz; jacobianProy(1,2)=-(fy*transformedPoint3D(1))*inv_transformedPz*inv_transformedPz; //Intensity jacobian: //Apply the chain rule to compound the intensity gradients with the projective+RigidTransform jacobians Eigen::Matrix<double,1,2> target_intensityGradient; target_intensityGradient(0,0)=target_intensityGradXImg.at<float>(i); target_intensityGradient(0,1)=target_intensityGradYImg.at<float>(i); Eigen::Matrix<double,1,6> jacobianItensity=target_intensityGradient*jacobianProy*jacobianRt; //Depth jacobian: //Apply the chain rule to compound the depth gradients with the projective+RigidTransform jacobians Eigen::Matrix<double,1,2> target_depthGradient; target_depthGradient(0,0)=target_depthGradXImg.at<float>(i); target_depthGradient(0,1)=target_depthGradYImg.at<float>(i); Eigen::Matrix<double,1,6> jacobianRt_z; jacobianRt_z(0,0)=jacobianRt(2,0); jacobianRt_z(0,1)=jacobianRt(2,1); jacobianRt_z(0,2)=jacobianRt(2,2); jacobianRt_z(0,3)=jacobianRt(2,3); jacobianRt_z(0,4)=jacobianRt(2,4); jacobianRt_z(0,5)=jacobianRt(2,5); Eigen::Matrix<double,1,6> jacobianDepth=depthComponentGain*(target_depthGradient*jacobianProy*jacobianRt-jacobianRt_z); //Assign the pixel residual and jacobian to its corresponding row #pragma omp critical { //Assign intensity jacobians jacobians(i,0)=jacobianItensity(0,0); jacobians(i,1)=jacobianItensity(0,1); jacobians(i,2)=jacobianItensity(0,2); jacobians(i,3)=jacobianItensity(0,3); jacobians(i,4)=jacobianItensity(0,4); jacobians(i,5)=jacobianItensity(0,5); //Assign intensity residuals residuals(nCols*transformed_r_int+transformed_c_int,0) = intensity2 - intensity1; //Assign depth jacobians jacobians(2*i,0)=jacobianDepth(0,0); jacobians(2*i,1)=jacobianDepth(0,1); jacobians(2*i,2)=jacobianDepth(0,2); jacobians(2*i,3)=jacobianDepth(0,3); jacobians(2*i,4)=jacobianDepth(0,4); jacobians(2*i,5)=jacobianDepth(0,5); //Assign depth residuals residuals(nCols*2*transformed_r_int+2*transformed_c_int,0) = depthComponentGain*(depth2 - depth1); if(visualizeIterations) warped_source_grayImage.at<float>(transformed_r_int,transformed_c_int) = intensity1; } } } } } } enum TerminationCriteriaType {NonTerminated = -1,MaxIterationsReached = 0,GradientNormLowerThanThreshold = 1}; bool testTerminationCriteria() { bool optimizationFinished = false; double gradientNorm = gradients.norm(); TerminationCriteriaType terminationCriteria = NonTerminated; if(iter>=max_num_iterations[optimizationLevel]) { terminationCriteria = MaxIterationsReached; optimizationFinished = true; } else if(gradientNorm<min_gradient_norm[optimizationLevel]) { terminationCriteria = GradientNormLowerThanThreshold; optimizationFinished = true; } if(optimizationFinished) { #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout<<"----------------------------------------"<<std::endl; std::cout<<"Optimization level: "<<optimizationLevel<<std::endl; std::cout<<"Termination criteria: "; #endif switch(terminationCriteria) { case MaxIterationsReached: #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout<<" Max number of iterations reached ("<<max_num_iterations[optimizationLevel]<<")"<<std::endl;; #endif break; case GradientNormLowerThanThreshold: #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout<<" Gradient norm is lower than threshold ("<<gradient_tolerance[optimizationLevel]<<")"<<std::endl; #endif break; default : break; } #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout<<"Number iterations: "<<iter<<std::endl; std::cout<<"gradient norm: "<<gradientNorm<<std::endl; std::cout<<"----------------------------------------"<<std::endl; #endif } return optimizationFinished; } public: CPhotoconsistencyOdometryBiObjective(){minDepth=0.3;maxDepth=5.0;}; ~CPhotoconsistencyOdometryBiObjective(){}; /*!Sets the minimum depth distance (m) to consider a certain pixel valid.*/ void setMinDepth(float minD) { minDepth = minD; } /*!Sets the maximum depth distance (m) to consider a certain pixel valid.*/ void setMaxDepth(float maxD) { maxDepth = maxD; } /*!Sets the 3x3 matrix of (pinhole) camera intrinsic parameters used to obtain the 3D colored point cloud from the RGB and depth images.*/ void setCameraMatrix(Eigen::Matrix3f & camMat) { cameraMatrix = camMat; } /*!Sets the source (Intensity+Depth) frame.*/ void setSourceFrame(cv::Mat & imgGray,cv::Mat & imgDepth) { //Create a float auxialiary image from the imput image cv::Mat imgGrayFloat; imgGray.convertTo(imgGrayFloat, CV_32FC1, 1./255 ); //Compute image pyramids for the grayscale and depth images buildPyramid(imgGrayFloat,gray0Pyr,numOptimizationLevels,true); buildPyramid(imgDepth,depth0Pyr,numOptimizationLevels,false); //TODO: Do not apply low-pass filtering to depth image } /*!Sets the source (Intensity+Depth) frame.*/ void setTargetFrame(cv::Mat & imgGray,cv::Mat & imgDepth) { //Create a float auxialiary image from the imput image cv::Mat imgGrayFloat; imgGray.convertTo(imgGrayFloat, CV_32FC1, 1./255 ); //Compute image pyramids for the grayscale and depth images buildPyramid(imgGrayFloat,gray1Pyr,numOptimizationLevels,true); buildPyramid(imgDepth,depth1Pyr,numOptimizationLevels,false); //TODO: Do not apply low-pass filtering to depth image //Compute image pyramids for the gradients images buildIntensityDerivativesPyramids(gray1Pyr,gray1GradXPyr,gray1GradYPyr); buildDepthDerivativesPyramids(depth1Pyr,depth1GradXPyr,depth1GradYPyr); } /*!Initializes the state vector to a certain value. The optimization process uses the initial state vector as the initial estimate.*/ void setInitialStateVector(const std::vector<double> & initialStateVector) { stateVector[0] = initialStateVector[0]; stateVector[1] = initialStateVector[1]; stateVector[2] = initialStateVector[2]; stateVector[3] = initialStateVector[3]; stateVector[4] = initialStateVector[4]; stateVector[5] = initialStateVector[5]; } /*!Launches the least-squares optimization process to find the configuration of the state vector parameters that maximizes the photoconsistency between the source and target frame.*/ void optimize() { for(optimizationLevel = numOptimizationLevels-1;optimizationLevel>=0;optimizationLevel--) { int nRows = gray0Pyr[optimizationLevel].rows; int nCols = gray0Pyr[optimizationLevel].cols; int nPoints = nRows * nCols; iter = 0; while(true) { #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS cv::TickMeter tm;tm.start(); #endif cv::Mat warped_source_grayImage; if(visualizeIterations) warped_source_grayImage = cv::Mat::zeros(nRows,nCols,gray0Pyr[optimizationLevel].type()); Eigen::Matrix<double,Eigen::Dynamic,1> residuals; residuals = Eigen::MatrixXd::Zero(2*nPoints,1); Eigen::Matrix<double,Eigen::Dynamic,6> jacobians; jacobians = Eigen::MatrixXd::Zero(2*nPoints,6); if(max_num_iterations[optimizationLevel]>0) //compute only if the number of maximum iterations are greater than 0 { computeResidualsAndJacobians( gray0Pyr[optimizationLevel], depth0Pyr[optimizationLevel], gray1Pyr[optimizationLevel], depth1Pyr[optimizationLevel], gray1GradXPyr[optimizationLevel], gray1GradYPyr[optimizationLevel], depth1GradXPyr[optimizationLevel], depth1GradYPyr[optimizationLevel], residuals, jacobians, warped_source_grayImage); gradients = jacobians.transpose()*residuals; stateVector = stateVector - lambda_optimization_step[optimizationLevel]*((jacobians.transpose()*jacobians).inverse() * gradients); #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS tm.stop(); std::cout << "Iteration time = " << tm.getTimeSec() << " sec." << std::endl; #endif } iter++; if(testTerminationCriteria()){break;} if(visualizeIterations) { cv::Mat imgDiff = cv::Mat::zeros(nRows,nCols,gray1Pyr[optimizationLevel].type()); cv::absdiff(gray1Pyr[optimizationLevel],warped_source_grayImage,imgDiff); cv::imshow("optimize::imgDiff",imgDiff); cv::waitKey(0); } } } //After all the optimization process the optimization level is 0 optimizationLevel = 0; } /*!Returns the optimal state vector. This method has to be called after calling the optimize() method.*/ void getOptimalStateVector(std::vector<double> & optimalStateVector) { optimalStateVector[0] = stateVector[0]; optimalStateVector[1] = stateVector[1]; optimalStateVector[2] = stateVector[2]; optimalStateVector[3] = stateVector[3]; optimalStateVector[4] = stateVector[4]; optimalStateVector[5] = stateVector[5]; } /*!Returns the optimal 4x4 rigid transformation matrix between the source and target frame. This method has to be called after calling the optimize() method.*/ void getOptimalRigidTransformationMatrix(Eigen::Matrix4f & optimal_Rt) { eigenPose(stateVector[0],stateVector[1],stateVector[2], stateVector[3],stateVector[4],stateVector[5],optimal_Rt); } /*!Reads the configuration parameters from a .yml file.*/ void readConfigurationFile(std::string fileName) { cv::FileStorage fs(fileName, cv::FileStorage::READ); //Read the number of optimization levels fs["numOptimizationLevels"] >> numOptimizationLevels; #if ENABLE_GAUSSIAN_BLUR || ENABLE_BOX_FILTER_BLUR //Read the blur filter size at every pyramid level fs["blurFilterSize (at each level)"] >> blurFilterSize; #endif //Read the scaling factor for each gradient image at each level fs["imageGradientsScalingFactor (at each level)"] >> imageGradientsScalingFactor; //Read the lambda factor to change the optimization step fs["lambda_optimization_step (at each level)"] >> lambda_optimization_step; //Read the number of Levenberg-Marquardt iterations at each optimization level fs["max_num_iterations (at each level)"] >> max_num_iterations; //Read optimizer minimum gradient norm at each level fs["min_gradient_norm (at each level)"] >> min_gradient_norm; //Read the boolean value to determine if visualize the progress images or not fs["visualizeIterations"] >> visualizeIterations; } }; } //end namespace BiObjective } //end namespace PhotoconsistencyOdometry #endif
a.35.4.c
/* { dg-do compile } */ void work (int, int); void wrong4 (int n) { #pragma omp parallel default(shared) { int i; #pragma omp for for (i = 0; i < n; i++) { work (i, 0); /* incorrect nesting of barrier region in a loop region */ #pragma omp barrier /* { dg-error "may not be closely nested" } */ work (i, 1); } } }
main.c
#include <stdio.h> #include <omp.h> #define CHUNKSIZE 2 #define N 10 int main () { int i, chunk; int numberOfThreads, threadID; int a[N], b[N], c[N]; // intial values for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; printf("Static scheduling\n"); #pragma omp parallel shared(a, b, c, chunk) private(i, threadID) { #pragma omp for schedule(static, chunk) for (i = 0; i < N; i++) { threadID = omp_get_thread_num(); numberOfThreads = omp_get_num_threads(); c[i] = a[i] + b[i]; printf("Thread %d of %d is calculating the iteration %d\n", threadID, numberOfThreads, i); } } printf("Dynamic scheduling\n"); #pragma omp parallel shared(a, b, c, chunk) private(i, threadID) { #pragma omp for schedule(dynamic, chunk) for (i = 0; i < N; i++) { threadID = omp_get_thread_num(); numberOfThreads = omp_get_num_threads(); c[i] = a[i] + b[i]; printf("Thread %d of %d is calculating the iteration %d\n", threadID, numberOfThreads, i); } } }
GB_binop__eq_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_08__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_04__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64) // A*D function (colscale): GB (_AxD__eq_int64) // D*A function (rowscale): GB (_DxB__eq_int64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64) // C=scalar+B GB (_bind1st__eq_int64) // C=scalar+B' GB (_bind1st_tran__eq_int64) // C=A+scalar GB (_bind2nd__eq_int64) // C=A'+scalar GB (_bind2nd_tran__eq_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MergeShuffle.h
#pragma once #include "DefaultRandomGenerator.h" #include "shuffle/Shuffle.h" #include <algorithm> #include <thread> #include <vector> template <class ContainerType = std::vector<uint64_t>, class RandomGenerator = DefaultRandomGenerator> class MergeShuffle : public Shuffle<ContainerType, RandomGenerator> { private: static constexpr unsigned long cutoff = 0x10000; std::vector<DefaultRandomGenerator> generators; struct Flipper { Flipper( RandomGenerator& generator ) : g( generator ) { } RandomGenerator& g; uint64_t current = 0; uint64_t index = 0; bool operator()() { if( index == 0 ) current = g(); bool res = ( current >> index ) & 1; index = ( index + 1 ) % 64; return res; } }; static inline unsigned long randomInt( Flipper flip, unsigned long n ) { unsigned long v = 1; unsigned long d = 0; while( true ) { d += d + flip(); v += v; if( v >= n ) { if( d < n ) return d; v -= n; d -= n; } } } template <class T> void merge( T* start, uint64_t mid_idx, uint64_t end_idx, RandomGenerator& g ) { T* const original_start = start; T* mid = start + mid_idx; T* end = start + end_idx; Flipper flip( g ); while( true ) { if( flip() ) { if( start == mid ) break; } else { if( mid == end ) break; std::swap( *start, *mid ); mid++; } start++; } while( start != end ) { const uint64_t num_processed = start - original_start; const uint64_t index = randomInt( flip, num_processed ); std::swap( *( original_start + index ), *start ); start++; } } template <class T> void mergeShuffle( T* t, uint64_t n, RandomGenerator& g ) { // Calculate the number of divisions to reach the cutoff unsigned int c = 0; while( ( n >> c ) > cutoff ) c++; unsigned int q = 1 << c; unsigned long nn = n; if( generators.capacity() < q ) generators.reserve( q ); while( generators.size() < q ) generators.emplace_back( g() ); // Launch thread for local fisher yates #pragma omp parallel for for( unsigned int i = 0; i < q; i++ ) { unsigned long j = nn * i >> c; unsigned long k = std::min( nn * ( i + 1 ) >> c, nn ); assert( j < nn ); assert( k <= nn ); std::shuffle( t + j, t + k, this->generators[i] ); } for( unsigned int p = 1; p < q; p += p ) { #pragma omp parallel for for( unsigned int i = 0; i < q; i += 2 * p ) { unsigned long j = nn * i >> c; unsigned long k = nn * ( i + p ) >> c; unsigned long l = std::min( nn * ( i + 2 * p ) >> c, nn ); assert( j < nn ); assert( k < nn ); assert( l <= nn ); merge( t + j, k - j, l - j, this->generators[i] ); } } } public: void shuffle( const ContainerType& in_container, ContainerType& out_container, uint64_t seed, uint64_t num ) override { if( &in_container != &out_container ) { // Copy if we are not doing an inplace operation std::copy( in_container.begin(), in_container.begin() + num, out_container.begin() ); } RandomGenerator g( seed ); mergeShuffle( out_container.data(), num, g ); } bool isDeterministic() const override { return false; } };
GB_unop__ainv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_uint32_uint32 // op(A') function: GB_unop_tran__ainv_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
column_matrix.h
/*! * Copyright 2017 by Contributors * \file column_matrix.h * \brief Utility for fast column-wise access * \author Philip Cho */ #ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_ #define XGBOOST_COMMON_COLUMN_MATRIX_H_ #include <limits> #include <vector> #include <memory> #include "hist_util.h" namespace xgboost { namespace common { class ColumnMatrix; /*! \brief column type */ enum ColumnType { kDenseColumn, kSparseColumn }; /*! \brief a column storage, to be used with ApplySplit. Note that each bin id is stored as index[i] + index_base. Different types of column index for each column allow to reduce the memory usage. */ template <typename BinIdxType> class Column { public: Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base) : type_(type), index_(index), index_base_(index_base) {} uint32_t GetGlobalBinIdx(size_t idx) const { return index_base_ + static_cast<uint32_t>(index_[idx]); } BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; } const uint32_t GetBaseIdx() const { return index_base_; } common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; } ColumnType GetType() const { return type_; } /* returns number of elements in column */ size_t Size() const { return index_.size(); } private: /* type of column */ ColumnType type_; /* bin indexes in range [0, max_bins - 1] */ common::Span<const BinIdxType> index_; /* bin index offset for specific feature */ const uint32_t index_base_; }; template <typename BinIdxType> class SparseColumn: public Column<BinIdxType> { public: SparseColumn(ColumnType type, common::Span<const BinIdxType> index, uint32_t index_base, common::Span<const size_t> row_ind) : Column<BinIdxType>(type, index, index_base), row_ind_(row_ind) {} const size_t* GetRowData() const { return row_ind_.data(); } size_t GetRowIdx(size_t idx) const { return row_ind_.data()[idx]; } private: /* indexes of rows */ common::Span<const size_t> row_ind_; }; template <typename BinIdxType> class DenseColumn: public Column<BinIdxType> { public: DenseColumn(ColumnType type, common::Span<const BinIdxType> index, uint32_t index_base, const std::vector<bool>& missing_flags, size_t feature_offset) : Column<BinIdxType>(type, index, index_base), missing_flags_(missing_flags), feature_offset_(feature_offset) {} bool IsMissing(size_t idx) const { return missing_flags_[feature_offset_ + idx]; } private: /* flags for missing values in dense columns */ const std::vector<bool>& missing_flags_; size_t feature_offset_; }; /*! \brief a collection of columns, with support for construction from GHistIndexMatrix. */ class ColumnMatrix { public: // get number of features inline bst_uint GetNumFeature() const { return static_cast<bst_uint>(type_.size()); } // construct column matrix from GHistIndexMatrix inline void Init(const GHistIndexMatrix& gmat, double sparse_threshold) { const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1); const size_t nrow = gmat.row_ptr.size() - 1; // identify type of each column feature_counts_.resize(nfeature); type_.resize(nfeature); std::fill(feature_counts_.begin(), feature_counts_.end(), 0); uint32_t max_val = std::numeric_limits<uint32_t>::max(); for (int32_t fid = 0; fid < nfeature; ++fid) { CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val); } bool all_dense = gmat.IsDense(); gmat.GetFeatureCounts(&feature_counts_[0]); // classify features for (int32_t fid = 0; fid < nfeature; ++fid) { if (static_cast<double>(feature_counts_[fid]) < sparse_threshold * nrow) { type_[fid] = kSparseColumn; all_dense = false; } else { type_[fid] = kDenseColumn; } } // want to compute storage boundary for each feature // using variants of prefix sum scan feature_offsets_.resize(nfeature + 1); size_t accum_index_ = 0; feature_offsets_[0] = accum_index_; for (int32_t fid = 1; fid < nfeature + 1; ++fid) { if (type_[fid - 1] == kDenseColumn) { accum_index_ += static_cast<size_t>(nrow); } else { accum_index_ += feature_counts_[fid - 1]; } feature_offsets_[fid] = accum_index_; } SetTypeSize(gmat.max_num_bins); index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0); if (!all_dense) { row_ind_.resize(feature_offsets_[nfeature]); } // store least bin id for each feature index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data()); const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature); if (noMissingValues) { missing_flags_.resize(feature_offsets_[nfeature], false); } else { missing_flags_.resize(feature_offsets_[nfeature], true); } // pre-fill index_ for dense columns if (all_dense) { BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize(); if (gmat_bin_size == kUint8BinsTypeSize) { SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues); } else if (gmat_bin_size == kUint16BinsTypeSize) { SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues); } else { CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize); SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues); } /* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize but for ColumnMatrix we still have a chance to reduce the memory consumption */ } else { if (bins_type_size_ == kUint8BinsTypeSize) { SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); } else if (bins_type_size_ == kUint16BinsTypeSize) { SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); } else { CHECK_EQ(bins_type_size_, kUint32BinsTypeSize); SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); } } } /* Set the number of bytes based on numeric limit of maximum number of bins provided by user */ void SetTypeSize(size_t max_num_bins) { if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) { bins_type_size_ = kUint8BinsTypeSize; } else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) { bins_type_size_ = kUint16BinsTypeSize; } else { bins_type_size_ = kUint32BinsTypeSize; } } /* Fetch an individual column. This code should be used with type swith to determine type of bin id's */ template <typename BinIdxType> std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const { CHECK_EQ(sizeof(BinIdxType), bins_type_size_); const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature const size_t column_size = feature_offsets_[fid + 1] - feature_offset; common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>( &index_[feature_offset * bins_type_size_]), column_size }; std::unique_ptr<const Column<BinIdxType> > res; if (type_[fid] == ColumnType::kDenseColumn) { res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid], missing_flags_, feature_offset)); } else { res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid], {&row_ind_[feature_offset], column_size})); } return res; } template<typename T> inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow, const size_t nfeature, const bool noMissingValues) { T* local_index = reinterpret_cast<T*>(&index_[0]); /* missing values make sense only for column with type kDenseColumn, and if no missing values were observed it could be handled much faster. */ if (noMissingValues) { #pragma omp parallel for num_threads(omp_get_max_threads()) for (omp_ulong rid = 0; rid < nrow; ++rid) { const size_t ibegin = rid*nfeature; const size_t iend = (rid+1)*nfeature; size_t j = 0; for (size_t i = ibegin; i < iend; ++i, ++j) { const size_t idx = feature_offsets_[j]; local_index[idx + rid] = index[i]; } } } else { /* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */ size_t rbegin = 0; for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) { const xgboost::Entry* data_ptr = batch.data.HostVector().data(); const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector(); const size_t batch_size = batch.Size(); CHECK_LT(batch_size, offset_vec.size()); for (size_t rid = 0; rid < batch_size; ++rid) { const size_t size = offset_vec[rid + 1] - offset_vec[rid]; SparsePage::Inst inst = {data_ptr + offset_vec[rid], size}; const size_t ibegin = gmat.row_ptr[rbegin + rid]; const size_t iend = gmat.row_ptr[rbegin + rid + 1]; CHECK_EQ(ibegin + inst.size(), iend); size_t j = 0; size_t fid = 0; for (size_t i = ibegin; i < iend; ++i, ++j) { fid = inst[j].index; const size_t idx = feature_offsets_[fid]; /* rbegin allows to store indexes from specific SparsePage batch */ local_index[idx + rbegin + rid] = index[i]; missing_flags_[idx + rbegin + rid] = false; } } rbegin += batch.Size(); } } } template<typename T> inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat, const size_t nrow, const size_t nfeature) { std::vector<size_t> num_nonzeros; num_nonzeros.resize(nfeature); std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0); T* local_index = reinterpret_cast<T*>(&index_[0]); size_t rbegin = 0; for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) { const xgboost::Entry* data_ptr = batch.data.HostVector().data(); const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector(); const size_t batch_size = batch.Size(); CHECK_LT(batch_size, offset_vec.size()); for (size_t rid = 0; rid < batch_size; ++rid) { const size_t ibegin = gmat.row_ptr[rbegin + rid]; const size_t iend = gmat.row_ptr[rbegin + rid + 1]; size_t fid = 0; const size_t size = offset_vec[rid + 1] - offset_vec[rid]; SparsePage::Inst inst = {data_ptr + offset_vec[rid], size}; CHECK_EQ(ibegin + inst.size(), iend); size_t j = 0; for (size_t i = ibegin; i < iend; ++i, ++j) { const uint32_t bin_id = index[i]; fid = inst[j].index; if (type_[fid] == kDenseColumn) { T* begin = &local_index[feature_offsets_[fid]]; begin[rid + rbegin] = bin_id - index_base_[fid]; missing_flags_[feature_offsets_[fid] + rid + rbegin] = false; } else { T* begin = &local_index[feature_offsets_[fid]]; begin[num_nonzeros[fid]] = bin_id - index_base_[fid]; row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin; ++num_nonzeros[fid]; } } } rbegin += batch.Size(); } } const BinTypeSize GetTypeSize() const { return bins_type_size_; } const bool NoMissingValues(const size_t n_elements, const size_t n_row, const size_t n_features) { return n_elements == n_features * n_row; } private: std::vector<uint8_t> index_; std::vector<size_t> feature_counts_; std::vector<ColumnType> type_; std::vector<size_t> row_ind_; /* indicate where each column's index and row_ind is stored. */ std::vector<size_t> feature_offsets_; // index_base_[fid]: least bin id for feature fid uint32_t* index_base_; std::vector<bool> missing_flags_; BinTypeSize bins_type_size_; }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
bisection.c
#include "header.h" /** Finds a zero, using bisection, in the interval [a, b] of the function pointed to by fun_ptr. If no zero is found, the function returns -1.0. **/ double find_zero_bisection(double (*fun_ptr)(double), double a, double b) { const int max_iter = 100; // maximum number of iterations const double tol = 1e-8; // tolerance int i = 0; double c; /* perform bisection while the interval length is longer than the tolerance, or the maximum number of iterations has not been reached. */ do { c = a + (b-a)*0.5; // mid-point if ((*fun_ptr)(a) * (*fun_ptr)(c) > 0) { // check sign a = c; } else { b = c; } i++; } while (b-a > tol && i < max_iter); /* if sgn(f(a)) == sgn(f(b)), no zero was found */ if ((*fun_ptr)(a) * (*fun_ptr)(b) > 0) { return -1.0; } return c; } /** Finds all zeros in the interval [a, b]. This procedure divides the interval [a, b] into N smaller interval and then uses bisection on each of the subintervals. **/ array *find_all_zeros_bisection(double (*fun_ptr)(double), double a, double b, int N, int T) { array *zeros = malloc(sizeof(array)); zeros->ptr = malloc(N*sizeof(double)); zeros->size = 0; double dn = (b-a)/((double) N); double xl, xu; int chunk = ceil(N/(double)T); #pragma omp parallel for num_threads(T) schedule(static, chunk) private(xl, xu) for (int i = 0; i < N; i++) { xl = a + dn*i; xu = a + dn*(i+1); zeros->ptr[i] = find_zero_bisection(fun_ptr, xl, xu); } /* collect the found zeros */ for (int i = 0; i < N; i++) { if (zeros->ptr[i] != -1) { zeros->ptr[zeros->size] = zeros->ptr[i]; zeros->size++; } } zeros->ptr = realloc(zeros->ptr, zeros->size*sizeof(double)); return zeros; }
real_to_complex_3d.h
#ifndef SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_H #define SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_H #include <scitbx/fftpack/complex_to_complex.h> #include <scitbx/fftpack/real_to_complex.h> #include <omptbx/omp_or_stubs.h> #define SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP namespace scitbx { namespace fftpack { /*! \brief Physical dimensions of 3-dimensional real-to-complex array as complex array, given generic dimensions of real array. */ /*! The real-to-complex array contains product(n_complex) complex values, i.e. product(2*n_complex) real values. <p> See also: m_real_from_n_real() */ template <typename IntegerType, std::size_t D> inline af::tiny<IntegerType, D> n_complex_from_n_real(const af::tiny<IntegerType, D>& n_real) { af::tiny<IntegerType, D> result = n_real; result[D-1] = n_complex_from_n_real(result[D-1]); return result; } /*! \brief Physical dimensions of 3-dimensional real-to-complex array as real array, given generic dimensions of complex array. */ /*! The real-to-complex array contains product(n_complex) complex values, i.e. product(2*n_complex) real values. <p> See also: n_complex_from_n_real() */ template <typename IntegerType, std::size_t D> inline af::tiny<IntegerType, D> n_real_from_n_complex(const af::tiny<IntegerType, D>& n_complex) { af::tiny<IntegerType, D> result = n_complex; result[D-1] *= 2; return result; } /*! \brief Physical dimensions of 3-dimensional real-to-complex array as real array, given generic dimensions of real array. */ /*! The real-to-complex array contains product(n_complex) complex values, i.e. product(2*n_complex) real values. <p> See also: n_complex_from_n_real() */ template <typename IntegerType, std::size_t D> inline af::tiny<IntegerType, D> m_real_from_n_real(const af::tiny<IntegerType, D>& n_real) { af::tiny<IntegerType, D> result = n_real; result[D-1] = m_real_from_n_real(result[D-1]); return result; } //! 3-dimensional real-to-complex Fast Fourier Transformation. /*! The real-to-complex Fourier transform of a real array is Hermitian. I.e., map(i,j,k) is the conjugate complex of map(-i,-j,-k). Exploiting this symmetry leads to reduced memory usage and faster Fourier transformations. <p> In this implementation, the Hermitian symmetry is exploited by omitting the negative half-space in the third dimension. I.e., the real-to-complex transformed array contains only n_real/2+1 (n_real_to_n_complex()) complex values in the third dimension. <p> Note that sligthly more than half the data are present in the real-to-complex transformed array: both map(i,j,0) and map(-i,-j,0) are present. It would be impractical to remove this remaining symmetry. <b>For the backward transform, it is important to provide both map(i,j,0) and map(-i,-j,0)</b>. */ template <typename RealType, typename ComplexType = std::complex<RealType> > class real_to_complex_3d { public: #ifndef DOXYGEN_SHOULD_SKIP_THIS typedef RealType real_type; typedef ComplexType complex_type; #endif // DOXYGEN_SHOULD_SKIP_THIS //! Default constructor. real_to_complex_3d() {} //! Initialization for transforms of lengths n_real. /*! See also: Constructors of complex_to_complex and real_to_complex. */ real_to_complex_3d(const af::int3& n_real) : n_real_(n_real) { init(); } //! Initialization for transforms of lengths n0, n1, n2. /*! See also: Constructors of complex_to_complex and real_to_complex. */ real_to_complex_3d(std::size_t n0, std::size_t n1, std::size_t n2) : n_real_(n0, n1, n2) { init(); } //! Generic dimensions of real array. af::int3 n_real() const { return n_real_; } //! Physical dimensions of real-to-complex array as complex array. /*! See also: m_real(), n_complex_from_n_real() */ af::int3 n_complex() const { return n_complex_from_n_real(n_real_); } //! Physical dimensions of real-to-complex array as real array. /*! See also: n_complex(), m_real_from_n_real() */ af::int3 m_real() const { return m_real_from_n_real(n_real_); } //! In-place "forward" Fourier transformation. /*! See also: complex_to_complex, real_to_complex */ template <typename RealOrComplexMapType> void forward(RealOrComplexMapType map) { typedef typename RealOrComplexMapType::value_type real_or_complex_type; forward(map, real_or_complex_type()); } //! In-place "backward" Fourier transformation. /*! <b>It is important to provide both map(i,j,0) and map(-i,-j,0)</b>. See class details. <p> See also: complex_to_complex, real_to_complex */ template <typename RealOrComplexMapType> void backward(RealOrComplexMapType map) { typedef typename RealOrComplexMapType::value_type real_or_complex_type; backward(map, real_or_complex_type()); } private: void init(); // Cast map of complex to map of real. template <typename MapType> void forward(MapType map, complex_type) { typedef typename MapType::accessor_type accessor_type; af::ref<real_type, accessor_type> rmap( reinterpret_cast<real_type*>(map.begin()), n_real_from_n_complex(map.accessor())); forward(rmap, real_type()); } // Core routine always works on real maps. template <typename MapType> void forward(MapType map, real_type) // FUTURE: move out of class body { // TODO: avoid i, i+1 by casting to complex int nx = n_real_[0]; int ny = n_real_[1]; int nzc = fft1d_z_.n_complex(); int seq_size = 2 * std::max(std::max(nx, ny), nzc); scitbx::auto_array<real_type> seq_and_scratch; if (omp_in_parallel() == 0) omp_set_dynamic(0); #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp parallel #endif { int num_threads = omp_get_num_threads(); int i_thread = omp_get_thread_num(); #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp single #endif { seq_and_scratch = scitbx::auto_array<real_type>( new real_type[2 * seq_size * num_threads]); } real_type* seq = seq_and_scratch.get() + 2 * seq_size * i_thread; real_type* scratch = seq + seq_size; #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp for #endif for (int ix = 0; ix < nx; ix++) { for (int iy = 0; iy < ny; iy++) { // Transform along z (fast direction) fft1d_z_.forward(&map(ix, iy, 0), scratch); } for (int iz = 0; iz < nzc; iz++) { for (int iy = 0; iy < ny; iy++) { seq[2*iy] = map(ix, iy, 2*iz); seq[2*iy+1] = map(ix, iy, 2*iz+1); } // Transform along y (medium direction) fft1d_y_.transform(select_sign<forward_tag>(), seq, scratch); for (int iy = 0; iy < ny; iy++) { map(ix, iy, 2*iz) = seq[2*iy]; map(ix, iy, 2*iz+1) = seq[2*iy+1]; } } } #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp for #endif for (int iy = 0; iy < ny; iy++) { for (int iz = 0; iz < nzc; iz++) { for (int ix = 0; ix < nx; ix++) { seq[2*ix] = map(ix, iy, 2*iz); seq[2*ix+1] = map(ix, iy, 2*iz+1); } // Transform along x (slow direction) fft1d_x_.transform(select_sign<forward_tag>(), seq, scratch); for (int ix = 0; ix < nx; ix++) { map(ix, iy, 2*iz) = seq[2*ix]; map(ix, iy, 2*iz+1) = seq[2*ix+1]; } } } } } // Cast map of complex to map of real. template <typename MapType> void backward(MapType map, complex_type) { typedef typename MapType::accessor_type accessor_type; af::ref<real_type, accessor_type> rmap( reinterpret_cast<real_type*>(map.begin()), n_real_from_n_complex(map.accessor())); backward(rmap, real_type()); } // Core routine always works on real maps. template <typename MapType> void backward(MapType map, real_type) // FUTURE: move out of class body { // TODO: avoid i, i+1 by casting to complex int nx = n_real_[0]; int ny = n_real_[1]; int nzc = fft1d_z_.n_complex(); int seq_size = 2 * std::max(std::max(nx, ny), nzc); scitbx::auto_array<real_type> seq_and_scratch; if (omp_in_parallel() == 0) omp_set_dynamic(0); #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp parallel #endif { int num_threads = omp_get_num_threads(); int i_thread = omp_get_thread_num(); #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp single #endif { seq_and_scratch = scitbx::auto_array<real_type>( new real_type[2 * seq_size * num_threads]); } real_type* seq = seq_and_scratch.get() + 2 * seq_size * i_thread; real_type* scratch = seq + seq_size; #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp for #endif for (int iz = 0; iz < nzc; iz++) { for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { seq[2*ix] = map(ix, iy, 2*iz); seq[2*ix+1] = map(ix, iy, 2*iz+1); } // Transform along x (slow direction) fft1d_x_.transform(select_sign<backward_tag>(), seq, scratch); for (int ix = 0; ix < nx; ix++) { map(ix, iy, 2*iz) = seq[2*ix]; map(ix, iy, 2*iz+1) = seq[2*ix+1]; } } for (int ix = 0; ix < nx; ix++) { for (int iy = 0; iy < ny; iy++) { seq[2*iy] = map(ix, iy, 2*iz); seq[2*iy+1] = map(ix, iy, 2*iz+1); } // Transform along y (medium direction) fft1d_y_.transform(select_sign<backward_tag>(), seq, scratch); for (int iy = 0; iy < ny; iy++) { map(ix, iy, 2*iz) = seq[2*iy]; map(ix, iy, 2*iz+1) = seq[2*iy+1]; } } } #if !defined(SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_NO_PRAGMA_OMP) #pragma omp for #endif for (int ix = 0; ix < nx; ix++) { for (int iy = 0; iy < ny; iy++) { // Transform along z (fast direction) fft1d_z_.backward(&map(ix, iy, 0), scratch); } } } } private: af::int3 n_real_; complex_to_complex<real_type, complex_type> fft1d_x_; complex_to_complex<real_type, complex_type> fft1d_y_; real_to_complex<real_type, complex_type> fft1d_z_; }; template <typename RealType, typename ComplexType> void real_to_complex_3d<RealType, ComplexType>::init() { fft1d_x_ = complex_to_complex<real_type, complex_type>(n_real_[0]); fft1d_y_ = complex_to_complex<real_type, complex_type>(n_real_[1]); fft1d_z_ = real_to_complex<real_type, complex_type>(n_real_[2]); } }} // namespace scitbx::fftpack #endif // SCITBX_FFTPACK_REAL_TO_COMPLEX_3D_H
ompArrayCopy.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> #define ARRAY_LENGTH 1024 int main(int argc, char const *argv[]) { puts("Welcome!"); puts("Filling array..."); unsigned long int arr[ARRAY_LENGTH] = {0}; for(unsigned long int i = 0; i < ARRAY_LENGTH; ++i) arr[i] = i; puts("Array filled!"); puts("Coping array..."); unsigned long int newArray[ARRAY_LENGTH] = {0}; omp_set_num_threads(4); #pragma omp parallel for for(unsigned long int i = 0; i < ARRAY_LENGTH; ++i) newArray[i] = arr[i]; puts("array copied!"); puts("Displaing array..."); for(unsigned long int i = 0; i<ARRAY_LENGTH; ++i) printf("arr[%li] = %li -> newArray[%li] = %li\n",i,arr[i],i,newArray[i]); puts("Program finished!"); return 0; }
GB_binop__isne_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_fp32 // A.*B function (eWiseMult): GB_AemultB__isne_fp32 // A*D function (colscale): GB_AxD__isne_fp32 // D*A function (rowscale): GB_DxB__isne_fp32 // C+=B function (dense accum): GB_Cdense_accumB__isne_fp32 // C+=b function (dense accum): GB_Cdense_accumb__isne_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_fp32 // C=scalar+B GB_bind1st__isne_fp32 // C=scalar+B' GB_bind1st_tran__isne_fp32 // C=A+scalar GB_bind2nd__isne_fp32 // C=A'+scalar GB_bind2nd_tran__isne_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FP32 || GxB_NO_ISNE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif